summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-12-05 16:29:01 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-12-05 16:29:01 -0800
commit21d081fd7f83aa168ea0bef0870c7f1fed410a63 (patch)
tree23a04eef49364b1cbee204a87fd0904be2430c2e /deps/v8/src
parente90623edc2befb06602ff8c3e01809ba0a21d593 (diff)
downloadnode-21d081fd7f83aa168ea0bef0870c7f1fed410a63.tar.gz
Upgrade V8 to 3.7.12
Diffstat (limited to 'deps/v8/src')
-rwxr-xr-x[-rw-r--r--]deps/v8/src/SConscript7
-rw-r--r--deps/v8/src/accessors.cc18
-rw-r--r--deps/v8/src/allocation.h6
-rw-r--r--deps/v8/src/api.cc220
-rw-r--r--deps/v8/src/api.h18
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h41
-rw-r--r--deps/v8/src/arm/assembler-arm.cc12
-rw-r--r--deps/v8/src/arm/assembler-arm.h16
-rw-r--r--deps/v8/src/arm/builtins-arm.cc1206
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1148
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h321
-rw-r--r--deps/v8/src/arm/codegen-arm.cc349
-rw-r--r--deps/v8/src/arm/codegen-arm.h26
-rw-r--r--deps/v8/src/arm/constants-arm.h47
-rw-r--r--deps/v8/src/arm/debug-arm.cc88
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc116
-rw-r--r--deps/v8/src/arm/frames-arm.h43
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc781
-rw-r--r--deps/v8/src/arm/ic-arm.cc259
-rw-r--r--deps/v8/src/arm/lithium-arm.cc177
-rw-r--r--deps/v8/src/arm/lithium-arm.h119
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc835
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h64
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc865
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h337
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc20
-rw-r--r--deps/v8/src/arm/simulator-arm.cc29
-rw-r--r--deps/v8/src/arm/simulator-arm.h7
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc1655
-rw-r--r--deps/v8/src/array.js188
-rw-r--r--deps/v8/src/assembler.cc90
-rw-r--r--deps/v8/src/assembler.h86
-rw-r--r--deps/v8/src/ast-inl.h121
-rw-r--r--deps/v8/src/ast.cc199
-rw-r--r--deps/v8/src/ast.h300
-rw-r--r--deps/v8/src/atomicops_internals_mips_gcc.h48
-rw-r--r--deps/v8/src/bootstrapper.cc288
-rw-r--r--deps/v8/src/builtins.cc178
-rw-r--r--deps/v8/src/builtins.h47
-rw-r--r--deps/v8/src/bytecodes-irregexp.h8
-rw-r--r--deps/v8/src/cached-powers.cc12
-rw-r--r--deps/v8/src/char-predicates-inl.h4
-rw-r--r--deps/v8/src/checks.h121
-rw-r--r--deps/v8/src/code-stubs.cc113
-rw-r--r--deps/v8/src/code-stubs.h249
-rw-r--r--deps/v8/src/codegen.cc2
-rw-r--r--deps/v8/src/codegen.h15
-rw-r--r--deps/v8/src/collection.js (renamed from deps/v8/src/weakmap.js)117
-rw-r--r--deps/v8/src/compilation-cache.cc45
-rw-r--r--deps/v8/src/compilation-cache.h34
-rw-r--r--deps/v8/src/compiler-intrinsics.h77
-rw-r--r--deps/v8/src/compiler.cc124
-rw-r--r--deps/v8/src/compiler.h58
-rw-r--r--deps/v8/src/contexts.cc206
-rw-r--r--deps/v8/src/contexts.h105
-rw-r--r--deps/v8/src/conversions-inl.h48
-rw-r--r--deps/v8/src/conversions.h20
-rw-r--r--deps/v8/src/d8-debug.cc5
-rw-r--r--deps/v8/src/d8.cc78
-rw-r--r--deps/v8/src/d8.gyp2
-rw-r--r--deps/v8/src/d8.js109
-rw-r--r--deps/v8/src/date.js51
-rw-r--r--deps/v8/src/debug-debugger.js147
-rw-r--r--deps/v8/src/debug.cc480
-rw-r--r--deps/v8/src/debug.h99
-rw-r--r--deps/v8/src/deoptimizer.cc97
-rw-r--r--deps/v8/src/deoptimizer.h36
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/double.h6
-rw-r--r--deps/v8/src/dtoa.h4
-rw-r--r--deps/v8/src/elements.cc344
-rw-r--r--deps/v8/src/elements.h5
-rw-r--r--deps/v8/src/execution.cc177
-rw-r--r--deps/v8/src/execution.h13
-rw-r--r--deps/v8/src/extensions/gc-extension.cc12
-rw-r--r--deps/v8/src/factory.cc192
-rw-r--r--deps/v8/src/factory.h55
-rw-r--r--deps/v8/src/fast-dtoa.h4
-rw-r--r--deps/v8/src/flag-definitions.h60
-rw-r--r--deps/v8/src/frames-inl.h88
-rw-r--r--deps/v8/src/frames.cc211
-rw-r--r--deps/v8/src/frames.h93
-rw-r--r--deps/v8/src/full-codegen.cc248
-rw-r--r--deps/v8/src/full-codegen.h152
-rw-r--r--deps/v8/src/gdb-jit.cc6
-rw-r--r--deps/v8/src/globals.h42
-rw-r--r--deps/v8/src/handles.cc156
-rw-r--r--deps/v8/src/handles.h66
-rw-r--r--deps/v8/src/hashmap.cc8
-rw-r--r--deps/v8/src/hashmap.h9
-rw-r--r--deps/v8/src/heap-inl.h152
-rw-r--r--deps/v8/src/heap-profiler.cc1
-rw-r--r--deps/v8/src/heap.cc2033
-rw-r--r--deps/v8/src/heap.h648
-rw-r--r--deps/v8/src/hydrogen-instructions.cc370
-rw-r--r--deps/v8/src/hydrogen-instructions.h695
-rw-r--r--deps/v8/src/hydrogen.cc1000
-rw-r--r--deps/v8/src/hydrogen.h57
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h43
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc114
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h91
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc1053
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1657
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h301
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc411
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h16
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc101
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc213
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc41
-rw-r--r--deps/v8/src/ia32/frames-ia32.h20
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc1037
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc308
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc911
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h75
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc587
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h158
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc1057
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h305
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc121
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc1714
-rw-r--r--deps/v8/src/ic-inl.h2
-rw-r--r--deps/v8/src/ic.cc1164
-rw-r--r--deps/v8/src/ic.h215
-rw-r--r--deps/v8/src/incremental-marking-inl.h133
-rw-r--r--deps/v8/src/incremental-marking.cc920
-rw-r--r--deps/v8/src/incremental-marking.h281
-rw-r--r--deps/v8/src/interpreter-irregexp.cc37
-rw-r--r--deps/v8/src/interpreter-irregexp.h12
-rw-r--r--deps/v8/src/isolate-inl.h13
-rw-r--r--deps/v8/src/isolate.cc45
-rw-r--r--deps/v8/src/isolate.h53
-rw-r--r--deps/v8/src/json-parser.h2
-rw-r--r--deps/v8/src/json.js2
-rw-r--r--deps/v8/src/jsregexp.cc23
-rw-r--r--deps/v8/src/jsregexp.h5
-rw-r--r--deps/v8/src/list-inl.h5
-rw-r--r--deps/v8/src/list.h4
-rw-r--r--deps/v8/src/lithium-allocator.cc31
-rw-r--r--deps/v8/src/lithium-allocator.h5
-rw-r--r--deps/v8/src/lithium.cc22
-rw-r--r--deps/v8/src/lithium.h26
-rw-r--r--deps/v8/src/liveedit-debugger.js47
-rw-r--r--deps/v8/src/liveedit.cc50
-rw-r--r--deps/v8/src/liveobjectlist.cc20
-rw-r--r--deps/v8/src/log.cc16
-rw-r--r--deps/v8/src/log.h7
-rw-r--r--deps/v8/src/macro-assembler.h59
-rw-r--r--deps/v8/src/macros.py7
-rw-r--r--deps/v8/src/mark-compact-inl.h95
-rw-r--r--deps/v8/src/mark-compact.cc3878
-rw-r--r--deps/v8/src/mark-compact.h656
-rw-r--r--deps/v8/src/math.js4
-rw-r--r--deps/v8/src/messages.cc16
-rw-r--r--deps/v8/src/messages.js168
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h45
-rw-r--r--deps/v8/src/mips/assembler-mips.cc12
-rw-r--r--deps/v8/src/mips/assembler-mips.h2
-rw-r--r--deps/v8/src/mips/builtins-mips.cc1311
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1397
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h322
-rw-r--r--deps/v8/src/mips/codegen-mips.cc269
-rw-r--r--deps/v8/src/mips/codegen-mips.h16
-rw-r--r--deps/v8/src/mips/constants-mips.h170
-rw-r--r--deps/v8/src/mips/debug-mips.cc89
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc715
-rw-r--r--deps/v8/src/mips/frames-mips.h50
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc829
-rw-r--r--deps/v8/src/mips/ic-mips.cc293
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc4651
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h390
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc309
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.h83
-rw-r--r--deps/v8/src/mips/lithium-mips.cc2237
-rw-r--r--deps/v8/src/mips/lithium-mips.h2254
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1403
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h468
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc31
-rw-r--r--deps/v8/src/mips/simulator-mips.cc26
-rw-r--r--deps/v8/src/mips/simulator-mips.h7
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc1703
-rw-r--r--deps/v8/src/mirror-debugger.js211
-rw-r--r--deps/v8/src/mksnapshot.cc2
-rw-r--r--deps/v8/src/objects-debug.cc53
-rw-r--r--deps/v8/src/objects-inl.h1161
-rw-r--r--deps/v8/src/objects-printer.cc85
-rw-r--r--deps/v8/src/objects-visiting-inl.h151
-rw-r--r--deps/v8/src/objects-visiting.cc15
-rw-r--r--deps/v8/src/objects-visiting.h138
-rw-r--r--deps/v8/src/objects.cc2822
-rw-r--r--deps/v8/src/objects.h1334
-rw-r--r--deps/v8/src/parser.cc1236
-rw-r--r--deps/v8/src/parser.h139
-rw-r--r--deps/v8/src/platform-freebsd.cc108
-rw-r--r--deps/v8/src/platform-linux.cc173
-rw-r--r--deps/v8/src/platform-macos.cc137
-rw-r--r--deps/v8/src/platform-openbsd.cc415
-rw-r--r--deps/v8/src/platform-posix.cc38
-rw-r--r--deps/v8/src/platform-win32.cc91
-rw-r--r--deps/v8/src/platform.h62
-rw-r--r--deps/v8/src/preparse-data.h6
-rw-r--r--deps/v8/src/preparser-api.cc4
-rw-r--r--deps/v8/src/preparser.cc254
-rw-r--r--deps/v8/src/preparser.h99
-rw-r--r--deps/v8/src/prettyprinter.cc26
-rw-r--r--deps/v8/src/profile-generator.cc166
-rw-r--r--deps/v8/src/profile-generator.h14
-rw-r--r--deps/v8/src/property-details.h182
-rw-r--r--deps/v8/src/property.cc9
-rw-r--r--deps/v8/src/property.h49
-rw-r--r--deps/v8/src/proxy.js51
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc4
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc4
-rw-r--r--deps/v8/src/regexp.js83
-rw-r--r--deps/v8/src/rewriter.cc16
-rw-r--r--deps/v8/src/runtime-profiler.cc11
-rw-r--r--deps/v8/src/runtime.cc2499
-rw-r--r--deps/v8/src/runtime.h50
-rw-r--r--deps/v8/src/runtime.js36
-rw-r--r--deps/v8/src/safepoint-table.cc60
-rw-r--r--deps/v8/src/safepoint-table.h78
-rw-r--r--deps/v8/src/scanner.cc99
-rw-r--r--deps/v8/src/scanner.h270
-rw-r--r--deps/v8/src/scopeinfo.cc689
-rw-r--r--deps/v8/src/scopeinfo.h160
-rw-r--r--deps/v8/src/scopes.cc676
-rw-r--r--deps/v8/src/scopes.h219
-rw-r--r--deps/v8/src/serialize.cc339
-rw-r--r--deps/v8/src/serialize.h77
-rw-r--r--deps/v8/src/spaces-inl.h510
-rw-r--r--deps/v8/src/spaces.cc3070
-rw-r--r--deps/v8/src/spaces.h2622
-rw-r--r--deps/v8/src/splay-tree-inl.h6
-rw-r--r--deps/v8/src/store-buffer-inl.h79
-rw-r--r--deps/v8/src/store-buffer.cc696
-rw-r--r--deps/v8/src/store-buffer.h248
-rw-r--r--deps/v8/src/string-search.h16
-rw-r--r--deps/v8/src/string-stream.cc37
-rw-r--r--deps/v8/src/string.js47
-rw-r--r--deps/v8/src/strtod.cc1
-rw-r--r--deps/v8/src/stub-cache.cc1649
-rw-r--r--deps/v8/src/stub-cache.h739
-rw-r--r--deps/v8/src/token.h5
-rw-r--r--deps/v8/src/type-info.cc136
-rw-r--r--deps/v8/src/type-info.h22
-rw-r--r--deps/v8/src/unicode.h4
-rw-r--r--deps/v8/src/uri.js47
-rw-r--r--deps/v8/src/utils.h73
-rw-r--r--deps/v8/src/v8-counters.h9
-rw-r--r--deps/v8/src/v8.cc32
-rw-r--r--deps/v8/src/v8.h18
-rw-r--r--deps/v8/src/v8conversions.h4
-rw-r--r--deps/v8/src/v8globals.h166
-rw-r--r--deps/v8/src/v8memory.h6
-rw-r--r--deps/v8/src/v8natives.js220
-rw-r--r--deps/v8/src/v8utils.h12
-rw-r--r--deps/v8/src/variables.cc26
-rw-r--r--deps/v8/src/variables.h73
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/win32-headers.h1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h44
-rw-r--r--deps/v8/src/x64/assembler-x64.cc11
-rw-r--r--deps/v8/src/x64/assembler-x64.h25
-rw-r--r--deps/v8/src/x64/builtins-x64.cc1164
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc989
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h287
-rw-r--r--deps/v8/src/x64/codegen-x64.cc334
-rw-r--r--deps/v8/src/x64/codegen-x64.h15
-rw-r--r--deps/v8/src/x64/debug-x64.cc111
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc179
-rw-r--r--deps/v8/src/x64/frames-x64.h20
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc776
-rw-r--r--deps/v8/src/x64/ic-x64.cc260
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc840
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h69
-rw-r--r--deps/v8/src/x64/lithium-x64.cc185
-rw-r--r--deps/v8/src/x64/lithium-x64.h131
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc978
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h354
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc27
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc1591
-rw-r--r--deps/v8/src/zone-inl.h8
-rw-r--r--deps/v8/src/zone.h4
282 files changed, 59518 insertions, 31250 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 52607f15c..42de36bc8 100644..100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -84,6 +84,7 @@ SOURCES = {
hydrogen.cc
hydrogen-instructions.cc
ic.cc
+ incremental-marking.cc
inspector.cc
interpreter-irregexp.cc
isolate.cc
@@ -133,6 +134,7 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
+ store-buffer.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc
@@ -170,6 +172,9 @@ SOURCES = {
mips/frames-mips.cc
mips/full-codegen-mips.cc
mips/ic-mips.cc
+ mips/lithium-codegen-mips.cc
+ mips/lithium-gap-resolver-mips.cc
+ mips/lithium-mips.cc
mips/macro-assembler-mips.cc
mips/regexp-macro-assembler-mips.cc
mips/stub-cache-mips.cc
@@ -319,7 +324,7 @@ debug-debugger.js
EXPERIMENTAL_LIBRARY_FILES = '''
proxy.js
-weakmap.js
+collection.js
'''.split()
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 951209d96..e60f568a2 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -527,7 +527,9 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
// correctly yet. Compile it now and return the right length.
HandleScope scope;
Handle<JSFunction> handle(function);
- if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
+ if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
return Smi::FromInt(handle->shared()->length());
} else {
return Smi::FromInt(function->shared()->length());
@@ -619,8 +621,9 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
if (!frame->is_optimized()) {
// If there is an arguments variable in the stack, we return that.
- Handle<SerializedScopeInfo> info(function->shared()->scope_info());
- int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
+ Handle<ScopeInfo> scope_info(function->shared()->scope_info());
+ int index = scope_info->StackSlotIndex(
+ isolate->heap()->arguments_symbol());
if (index >= 0) {
Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
@@ -672,7 +675,7 @@ static MaybeObject* CheckNonStrictCallerOrThrow(
Isolate* isolate,
JSFunction* caller) {
DisableAssertNoAllocation enable_allocation;
- if (caller->shared()->strict_mode()) {
+ if (!caller->shared()->is_classic_mode()) {
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_caller",
HandleVector<Object>(NULL, 0)));
@@ -759,7 +762,12 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
caller = potential_caller;
potential_caller = it.next();
}
-
+ // If caller is bound, return null. This is compatible with JSC, and
+ // allows us to make bound functions use the strict function map
+ // and its associated throwing caller and arguments.
+ if (caller->shared()->bound()) {
+ return isolate->heap()->null_value();
+ }
return CheckNonStrictCallerOrThrow(isolate, caller);
}
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 75aba35d8..00c5664ea 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -81,7 +81,7 @@ class AllStatic {
template <typename T>
-static T* NewArray(int size) {
+T* NewArray(int size) {
T* result = new T[size];
if (result == NULL) Malloced::FatalProcessOutOfMemory();
return result;
@@ -89,7 +89,7 @@ static T* NewArray(int size) {
template <typename T>
-static void DeleteArray(T* array) {
+void DeleteArray(T* array) {
delete[] array;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 479be5af1..35b8aa0fc 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -185,7 +185,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker;
heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current();
- isolate->heap()->RecordStats(&heap_stats, take_snapshot);
+ // BUG(1718):
+ // Don't use the take_snapshot since we don't support HeapIterator here
+ // without doing a special GC.
+ isolate->heap()->RecordStats(&heap_stats, false);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
@@ -483,7 +486,7 @@ RegisteredExtension* RegisteredExtension::first_extension_ = NULL;
RegisteredExtension::RegisteredExtension(Extension* extension)
- : extension_(extension), state_(UNVISITED) { }
+ : extension_(extension) { }
void RegisteredExtension::Register(RegisteredExtension* that) {
@@ -501,9 +504,12 @@ void RegisterExtension(Extension* that) {
Extension::Extension(const char* name,
const char* source,
int dep_count,
- const char** deps)
+ const char** deps,
+ int source_length)
: name_(name),
- source_(source),
+ source_length_(source_length >= 0 ?
+ source_length : (source ? strlen(source) : 0)),
+ source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) { }
@@ -1407,7 +1413,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+ return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
}
@@ -1416,10 +1422,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUC16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+ return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
} else {
i::GenericStringUC16CharacterStream stream(str, 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+ return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
}
}
@@ -1781,7 +1787,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
- i::Object** argv[],
+ i::Handle<i::Object> argv[],
bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@@ -1798,10 +1804,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> data,
bool* has_pending_exception) {
- i::Object** argv[1] = { data.location() };
+ i::Handle<i::Object> argv[] = { data };
return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(),
- 1,
+ ARRAY_SIZE(argv),
argv,
has_pending_exception);
}
@@ -2621,10 +2627,11 @@ bool Value::Equals(Handle<Value> that) const {
if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other;
}
- i::Object** args[1] = { other.location() };
+ i::Handle<i::Object> args[] = { other };
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result =
- CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
+ CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
+ &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
@@ -2787,7 +2794,7 @@ Local<Value> v8::Object::Get(uint32_t index) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetElement(self, index);
+ i::Handle<i::Object> result = i::Object::GetElement(self, index);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -2867,8 +2874,10 @@ Local<Array> v8::Object::GetPropertyNames() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ bool threw = false;
i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
+ i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
+ if (threw) return Local<v8::Array>();
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@@ -2886,8 +2895,10 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ bool threw = false;
i::Handle<i::FixedArray> value =
- i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
+ i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
+ if (threw) return Local<v8::Array>();
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@@ -3086,7 +3097,10 @@ static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
// If the property being looked up is a callback, it can throw
// an exception.
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
+ PropertyAttributes ignored;
+ i::Handle<i::Object> result =
+ i::Object::GetProperty(receiver, receiver, lookup, name,
+ &ignored);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
@@ -3103,7 +3117,7 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup;
+ i::LookupResult lookup(isolate);
self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@@ -3116,7 +3130,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupResult lookup;
+ i::LookupResult lookup(isolate);
self_obj->LookupRealNamedProperty(*key_obj, &lookup);
return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@@ -3204,21 +3218,10 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
- self,
- i::JSObject::ALLOW_CREATION));
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetProperty(
- hidden_props,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(None),
- i::kNonStrictMode);
- has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
+ i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
+ return *result == *self;
}
@@ -3228,20 +3231,9 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
- self,
- i::JSObject::OMIT_CREATION));
- if (hidden_props->IsUndefined()) {
- return v8::Local<v8::Value>();
- }
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
- if (result->IsUndefined()) {
- return v8::Local<v8::Value>();
- }
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
+ if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3252,15 +3244,9 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
- self,
- i::JSObject::OMIT_CREATION));
- if (hidden_props->IsUndefined()) {
- return true;
- }
- i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::DeleteProperty(js_obj, key_obj)->IsTrue();
+ self->DeleteHiddenProperty(*key_obj);
+ return true;
}
@@ -3310,22 +3296,12 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
i::Handle<i::ExternalArray> array =
isolate->factory()->NewExternalArray(length, array_type, data);
- // If the object already has external elements, create a new, unique
- // map if the element type is now changing, because assumptions about
- // generated code based on the receiver's map will be invalid.
- i::Handle<i::HeapObject> elements(object->elements());
- bool cant_reuse_map =
- elements->map()->IsUndefined() ||
- !elements->map()->has_external_array_elements() ||
- elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
- if (cant_reuse_map) {
- i::Handle<i::Map> external_array_map =
- isolate->factory()->GetElementsTransitionMap(
- i::Handle<i::Map>(object->map()),
- GetElementsKindFromExternalArrayType(array_type),
- object->HasFastProperties());
- object->set_map(*external_array_map);
- }
+ i::Handle<i::Map> external_array_map =
+ isolate->factory()->GetElementsTransitionMap(
+ object,
+ GetElementsKindFromExternalArrayType(array_type));
+
+ object->set_map(*external_array_map);
object->set_elements(*array);
}
@@ -3484,7 +3460,8 @@ bool v8::Object::IsCallable() {
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
+ int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@@ -3495,7 +3472,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
if (obj->IsJSFunction()) {
fun = i::Handle<i::JSFunction>::cast(obj);
@@ -3525,7 +3502,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
if (obj->IsJSFunction()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
EXCEPTION_PREAMBLE(isolate);
@@ -3567,7 +3544,7 @@ Local<v8::Object> Function::NewInstance(int argc,
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
@@ -3588,7 +3565,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
@@ -3642,6 +3619,23 @@ int Function::GetScriptLineNumber() const {
}
+int Function::GetScriptColumnNumber() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (func->shared()->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return i::GetScriptColumnNumber(script, func->shared()->start_position());
+ }
+ return kLineOffsetNotFound;
+}
+
+Handle<Value> Function::GetScriptId() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (!func->shared()->script()->IsScript())
+ return v8::Undefined();
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->id()));
+}
+
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
@@ -3664,13 +3658,30 @@ int String::WriteUtf8(char* buffer,
if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (str->IsAsciiRepresentation()) {
+ int len;
+ if (capacity == -1) {
+ capacity = str->length() + 1;
+ len = str->length();
+ } else {
+ len = i::Min(capacity, str->length());
+ }
+ i::String::WriteToFlat(*str, buffer, 0, len);
+ if (nchars_ref != NULL) *nchars_ref = len;
+ if (!(options & NO_NULL_TERMINATION) && capacity > len) {
+ buffer[len] = '\0';
+ return len + 1;
+ }
+ return len;
+ }
+
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str);
}
write_input_buffer.Reset(0, *str);
int len = str->length();
@@ -3799,10 +3810,11 @@ bool v8::String::IsExternalAscii() const {
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- v8::String::ExternalStringResource* expected;
+ const v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) {
- void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- expected = reinterpret_cast<ExternalStringResource*>(resource);
+ const void* resource =
+ i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+ expected = reinterpret_cast<const ExternalStringResource*>(resource);
} else {
expected = NULL;
}
@@ -3810,7 +3822,7 @@ void v8::String::VerifyExternalStringResource(
}
-v8::String::ExternalAsciiStringResource*
+const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(),
@@ -3818,8 +3830,9 @@ v8::String::ExternalAsciiStringResource*
return NULL;
}
if (i::StringShape(*str).IsExternalAscii()) {
- void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
- return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+ const void* resource =
+ i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+ return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
} else {
return NULL;
}
@@ -3989,6 +4002,15 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
+ if (!i::Isolate::Current()->IsInitialized()) {
+ // Isolate is unitialized thus heap is not configured yet.
+ heap_statistics->set_total_heap_size(0);
+ heap_statistics->set_total_heap_size_executable(0);
+ heap_statistics->set_used_heap_size(0);
+ heap_statistics->set_heap_size_limit(0);
+ return;
+ }
+
i::Heap* heap = i::Isolate::Current()->heap();
heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
@@ -3998,18 +4020,19 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
}
-bool v8::V8::IdleNotification() {
+bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
- if (!i::Isolate::Current()->IsInitialized()) return true;
- return i::V8::IdleNotification();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate == NULL || !isolate->IsInitialized()) return true;
+ return i::V8::IdleNotification(hint);
}
void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- isolate->heap()->CollectAllGarbage(true);
+ if (isolate == NULL || !isolate->IsInitialized()) return;
+ isolate->heap()->CollectAllAvailableGarbage();
}
@@ -4103,8 +4126,9 @@ Persistent<Context> v8::Context::New(
}
// Leave V8.
- if (env.is_null())
+ if (env.is_null()) {
return Persistent<Context>();
+ }
return Persistent<Context>(Utils::ToLocal(env));
}
@@ -4292,7 +4316,7 @@ static Local<External> ExternalNewImpl(void* data) {
}
static void* ExternalValueImpl(i::Handle<i::Object> obj) {
- return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
+ return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
}
@@ -4318,7 +4342,7 @@ void* v8::Object::SlowGetPointerFromInternalField(int index) {
if (value->IsSmi()) {
return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsForeign()) {
- return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
+ return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
} else {
return NULL;
}
@@ -4528,15 +4552,13 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
+ if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string.
- if (size < i::ExternalString::kSize)
- return false;
+ if (size < i::ExternalString::kShortSize) return false;
i::StringShape shape(*obj);
return !shape.IsExternal();
}
@@ -4870,7 +4892,7 @@ void V8::RemoveMessageListeners(MessageCallback that) {
NeanderObject listener(i::JSObject::cast(listeners.get(i)));
i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
- if (callback_obj->address() == FUNCTION_ADDR(that)) {
+ if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners.set(i, isolate->heap()->undefined_value());
}
}
@@ -5480,6 +5502,12 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
wait_for_connection);
}
+
+void Debug::DisableAgent() {
+ return i::Isolate::Current()->debugger()->StopAgent();
+}
+
+
void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 07723cb32..a825dd797 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -112,15 +112,16 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
}
-template <typename T> static inline T ToCData(v8::internal::Object* obj) {
+template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return reinterpret_cast<T>(
- reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
+ reinterpret_cast<intptr_t>(
+ v8::internal::Foreign::cast(obj)->foreign_address()));
}
template <typename T>
-static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return FACTORY->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@@ -136,10 +137,6 @@ class ApiFunction {
};
-enum ExtensionTraversalState {
- UNVISITED, VISITED, INSTALLED
-};
-
class RegisteredExtension {
public:
@@ -148,14 +145,11 @@ class RegisteredExtension {
Extension* extension() { return extension_; }
RegisteredExtension* next() { return next_; }
RegisteredExtension* next_auto() { return next_auto_; }
- ExtensionTraversalState state() { return state_; }
- void set_state(ExtensionTraversalState value) { state_ = value; }
static RegisteredExtension* first_extension() { return first_extension_; }
private:
Extension* extension_;
RegisteredExtension* next_;
RegisteredExtension* next_auto_;
- ExtensionTraversalState state_;
static RegisteredExtension* first_extension_;
};
@@ -242,7 +236,7 @@ class Utils {
template <class T>
-static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
return reinterpret_cast<T*>(obj.location());
}
@@ -483,7 +477,7 @@ class HandleScopeImplementer {
};
-static const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
+const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
void HandleScopeImplementer::SaveContext(Context* context) {
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3e19a4538..79f9c7bd2 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -64,7 +64,9 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
}
@@ -74,9 +76,14 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -98,9 +105,15 @@ Object** RelocInfo::target_object_address() {
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -127,10 +140,17 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -147,6 +167,11 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -195,13 +220,13 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
+ visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -221,13 +246,13 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 0ec36921a..329493a34 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -78,7 +78,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+ CpuFeaturesImpliedByCompiler());
+ ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -86,8 +88,7 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
+ supported_ |= standard_features;
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2505,7 +2506,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2537,7 +2539,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 9a586936f..247479d73 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -304,9 +304,9 @@ const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Aliases for double registers.
-const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
-const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
-const DwVfpRegister kDoubleRegZero = d14;
+static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
+static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
+static const DwVfpRegister& kDoubleRegZero = d14;
// Coprocessor register
@@ -1209,6 +1209,10 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
@@ -1263,12 +1267,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
- // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
// Decode branch instruction at pos and return branch target pos
int target_at(int pos);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 60d2081c2..d0136f550 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -86,12 +86,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
// Allocate an empty JSArray. The allocated array is put into the result
// register. An elements backing store is allocated with size initial_capacity
// and filled with the hole values.
@@ -101,16 +95,19 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity > 0);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ ldr(scratch1, FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
- int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
__ AllocateInNewSpace(size,
result,
scratch2,
@@ -130,6 +127,11 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ mov(scratch3, Operand(0, RelocInfo::NONE));
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+ if (initial_capacity == 0) {
+ __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+ return;
+ }
+
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
@@ -138,7 +140,6 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
__ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
@@ -147,18 +148,29 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array (untagged)
// scratch2: start of next object
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- // Fill the FixedArray with the hole value.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < initial_capacity; i++) {
+ static const int kLoopUnfoldLimit = 4;
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ for (int i = 0; i < initial_capacity; i++) {
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, entry;
+ __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+ __ b(&entry);
+ __ bind(&loop);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(scratch1, scratch2);
+ __ b(lt, &loop);
}
}
@@ -173,7 +185,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array_storage is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array_storage,
Register elements_array_end,
@@ -181,32 +193,18 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch2,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
-
// Load the initial map from the array function.
__ ldr(elements_array_storage,
FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
- // Check whether an empty sized array is requested.
- __ tst(array_size, array_size);
- __ b(ne, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize +
- FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch1,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
+ if (FLAG_debug_code) { // Assert that array size is not zero.
+ __ tst(array_size, array_size);
+ __ Assert(ne, "array size is unexpectedly 0");
+ }
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
- __ bind(&not_empty);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ mov(elements_array_end,
Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
@@ -226,7 +224,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array_storage: initial map
// array_size: size of array (smi)
- __ bind(&allocated);
__ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
__ str(elements_array_storage,
@@ -256,14 +253,6 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(array_size, array_size);
- // Length of the FixedArray is the number of pre-allocated elements if
- // the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is stored as a smi.
- __ mov(array_size,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
- LeaveCC,
- eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -311,20 +300,20 @@ static void AllocateJSArray(MacroAssembler* masm,
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more;
+ Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
+ __ bind(&empty_array);
AllocateEmptyJSArray(masm,
r1,
r2,
r3,
r4,
r5,
- JSArray::kPreallocatedArrayElements,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
@@ -339,6 +328,13 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(ne, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
+ __ tst(r2, r2);
+ __ b(ne, &not_empty_array);
+ __ Drop(1); // Adjust stack.
+ __ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
+ __ b(&empty_array);
+
+ __ bind(&not_empty_array);
__ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
__ b(ne, call_generic_code);
@@ -582,10 +578,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
__ pop(function);
__ mov(argument, r0);
__ b(&argument_is_string);
@@ -601,10 +598,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
__ Ret();
}
@@ -617,12 +615,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_call);
+ __ b(ne, &slow);
// Jump to the function-specific construct stub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -631,10 +629,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
+ // r2: object type
+ Label do_call;
+ __ bind(&slow);
+ __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -650,321 +657,329 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
// Enter a construct frame.
- __ EnterConstructFrame();
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the two incoming parameters on the stack.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ push(r0); // Smi-tagged arguments count.
+ __ push(r1); // Constructor function.
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ mov(r2, Operand(debug_step_in_fp));
+ __ ldr(r2, MemOperand(r2));
+ __ tst(r2, r2);
+ __ b(ne, &rt_call);
#endif
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
+ // Load the initial map and verify that it is in fact a map.
+ // r1: constructor function
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &rt_call);
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ b(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r1: constructor function
+ // r2: initial map
+ __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+ __ b(eq, &rt_call);
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+ __ ldrb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ strb(r4, constructor_count);
+ __ b(ne, &allocate);
+
+ __ Push(r1, r2);
+
+ __ push(r1); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(r2);
+ __ pop(r1);
+
+ __ bind(&allocate);
+ }
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- { Label loop, entry;
+ // Now allocate the JSObject on the heap.
+ // r1: constructor function
+ // r2: initial map
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size
+ // r4: JSObject (not tagged)
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(r5, r4);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size (in words)
+ // r4: JSObject (not tagged)
+ // r5: First in-object property of JSObject (not tagged)
+ __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
+ // r0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r0, r6);
+ __ Assert(le, "Unexpected number of pre-allocated property fields.");
+ }
+ __ InitializeFieldsWithFiller(r5, r0, r7);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r5, r6);
- __ b(lt, &loop);
- }
+ __ InitializeFieldsWithFiller(r5, r6, r7);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ add(r4, r4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // r1: constructor function
+ // r4: JSObject
+ // r5: start of next object (not tagged)
+ __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sub(r3, r3, Operand(r6), SetCC);
+
+ // Done if no extra properties are to be allocated.
+ __ b(eq, &allocated);
+ __ Assert(pl, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: start of next object
+ __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+ __ mov(r2, r5);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+ // Initialize the fields to undefined.
+ // r1: constructor function
+ // r2: First element of FixedArray (not tagged)
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ cmp(r7, r8);
+ __ Assert(eq, "Undefined value not loaded.");
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(r2, r6);
+ __ b(lt, &loop);
+ }
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // r1: constructor function
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // r4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(r4, r5);
+ }
- // Check if a non-empty properties array is needed. Continue with allocated
- // object if not fall through to runtime call if it is.
+ // Allocate the new receiver object using the runtime call.
// r1: constructor function
+ __ bind(&rt_call);
+ __ push(r1); // argument for Runtime_NewObject
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(r4, r0);
+
+ // Receiver for constructor call allocated.
// r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields and
- // in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
+ __ bind(&allocated);
+ __ push(r4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ push(r1); // Constructor function.
+ __ push(r4); // Receiver.
+
+ // Reload the number of arguments from the stack.
// r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below
+ __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+ // Copy arguments and receiver to the expression stack.
+ // r0: number of arguments
+ // r2: address of last argument (caller sp)
// r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+ // r3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r3, r3, Operand(2), SetCC);
+ __ b(ge, &loop);
- // Continue with JSObject being successfully allocated
+ // Call the function.
+ // r0: number of arguments
// r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
- __ push(r4);
-
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, kPointerSize));
- __ push(r1); // Constructor function.
- __ push(r4); // Receiver.
-
- // Reload the number of arguments from the stack.
- // r1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
- // Setup pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Setup number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r2: address of last argument (caller sp)
- // r1: constructor function
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
+ // Pop the function from the stack.
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ pop();
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Restore context from the frame.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
}
- // Pop the function from the stack.
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ pop();
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -997,63 +1012,64 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: argv
// r5-r7, cp may be clobbered
- // Clear the context before we push it when entering the JS frame.
+ // Clear the context before we push it when entering the internal frame.
__ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Set up the context from the function argument.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(r10, Operand(roots_address));
+ // Set up the roots register.
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(r10, Operand(roots_array_start));
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
+ // Push the function and the receiver onto the stack.
+ __ push(r1);
+ __ push(r2);
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
+ // Copy arguments to the stack in a loop.
+ // r1: function
+ // r3: argc
+ // r4: argv, i.e. points to first arg
+ Label loop, entry;
+ __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+ // r2 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r0, MemOperand(r0)); // dereference handle
+ __ push(r0); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, r2);
+ __ b(ne, &loop);
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mov(r5, Operand(r4));
+ __ mov(r6, Operand(r4));
+ __ mov(r7, Operand(r4));
+ if (kR9Available == 1) {
+ __ mov(r9, Operand(r4));
+ }
- // Exit the JS frame and remove the parameters (except function), and return.
- // Respect ABI stack constraint.
- __ LeaveInternalFrame();
+ // Invoke the code and pass argc as r0.
+ __ mov(r0, Operand(r3));
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+ // Exit the JS frame and remove the parameters (except function), and
+ // return.
+ // Respect ABI stack constraint.
+ }
__ Jump(lr);
// r0: result
@@ -1072,26 +1088,27 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1100,26 +1117,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1128,12 +1146,13 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- __ EnterInternalFrame();
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
// Get the full codegen state from the stack and untag it -> r6.
__ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1173,9 +1192,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
@@ -1191,10 +1211,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1276,17 +1297,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(r2, r0);
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ __ push(r0);
+
+ __ push(r2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(r2, r0);
+
+ __ pop(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+ // Exit the internal frame.
+ }
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ LeaveInternalFrame();
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1406,156 +1433,157 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub(r2, sp, r2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(gt, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ push(r1);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
- __ push(r1);
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(r0); // limit
+ __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ push(r1);
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
+ // Get the receiver.
+ __ ldr(r0, MemOperand(fp, kRecvOffset));
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &push_receiver);
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ // Change context eagerly to get the right global object if necessary.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in r1.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ b(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &push_receiver);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
+ // Do not transform the receiver for strict mode functions.
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &push_receiver);
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(r0, &call_to_object);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
+ // Check if the receiver is already a JavaScript object.
+ // r0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &push_receiver);
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
+ // Convert the receiver to a regular object.
+ // r0: receiver
+ __ bind(&call_to_object);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ b(&push_receiver);
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // r0: receiver
+ __ bind(&push_receiver);
+ __ push(r0);
- // Tear down the internal frame and remove function, receiver and args.
- __ LeaveInternalFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ b(&entry);
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r0: current argument index
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(fp, kArgsOffset));
+ __ push(r1);
+ __ push(r0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r0);
+
+ // Use inline caching to access the arguments.
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ add(r0, r0, Operand(1 << kSmiTagSize));
+ __ str(r0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ldr(r1, MemOperand(fp, kLimitOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &loop);
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &call_proxy);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ frame_scope.GenerateLeaveFrame();
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Jump(lr);
+
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(r1); // add function proxy as last argument
+ __ add(r0, r0, Operand(1));
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
- __ LeaveInternalFrame();
+ // Tear down the internal frame and remove function, receiver and args.
+ }
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index e65f6d9b6..8b1d0c4b3 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -98,9 +98,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -189,6 +189,121 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: function.
+ // [sp + kPointerSize]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ r0, r1, r2, &gc, TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0));
+
+ // Load the serialized scope info from the stack.
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(r3, &after_sentinel);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmp(r3, Operand::Zero());
+ __ Assert(eq, message);
+ }
+ __ ldr(r3, GlobalObjectOperand());
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
+ __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
+
+ // Copy the global object from the previous context.
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ //
+ // r3: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = 0;
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+ int size = JSArray::kSize + elements_size;
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ r0,
+ r1,
+ r2,
+ fail,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ ASSERT((elements_size % kPointerSize) == 0);
+ __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+ }
+}
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -196,10 +311,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [sp + kPointerSize]: literal index.
// [sp + (2 * kPointerSize)]: literals array.
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@@ -207,64 +318,111 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case);
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &check_fast_elements);
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&check_fast_elements);
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &double_elements);
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
__ push(r3);
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, expected_map_index);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, expected_map_index);
__ Assert(eq, message);
__ pop(r3);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- &slow_case,
- TAG_OBJECT);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- // Copy the elements array.
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: object literal flags.
+ // [sp + kPointerSize]: constant properties.
+ // [sp + (2 * kPointerSize)]: literal index.
+ // [sp + (3 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+ __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+ __ cmp(r0, Operand(size >> kPointerSizeLog2));
+ __ b(ne, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
}
// Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
@@ -838,9 +996,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 0, 2);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@@ -857,6 +1017,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+ // These variants are compiled ahead of time. See next method.
+ if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+ return true;
+ }
+ if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+ return true;
+ }
+ // Other register combinations are generated as and when they are needed,
+ // so it is unsafe to call them from stubs (we can't generate a stub while
+ // we are generating a stub).
+ return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+ WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+ WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+ stub1.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1197,6 +1380,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
+
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@@ -1214,7 +1399,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1791,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@@ -1713,6 +1900,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ stm(db_w, sp, kCallerSaved | lr.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = r1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ }
+ __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
+}
+
+
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@@ -1866,12 +2088,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ }
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +2135,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ }
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@@ -2028,6 +2252,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -3086,6 +3314,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmp(r3, r5);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into r0.
__ pop();
@@ -3098,6 +3329,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
} // if (CpuFeatures::IsSupported(VFP3))
__ bind(&calculate);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
ExternalReference runtime_function =
@@ -3133,10 +3367,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@@ -3149,14 +3384,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
}
@@ -3173,6 +3409,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
+ AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3182,6 +3419,10 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
__ CallCFunction(ExternalReference::math_cos_double_function(isolate),
0, 1);
break;
+ case TranscendentalCache::TAN:
+ __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+ 0, 1);
+ break;
case TranscendentalCache::LOG:
__ CallCFunction(ExternalReference::math_log_double_function(isolate),
0, 1);
@@ -3199,6 +3440,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -3268,11 +3510,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3298,11 +3543,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3319,6 +3567,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub(kSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@@ -3430,8 +3709,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r3, MemOperand(ip));
+ __ mov(r3, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@@ -3469,6 +3747,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3527,7 +3806,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// [sp+0]: argv
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
@@ -3590,31 +3869,33 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&cont);
__ push(ip);
- // Call a faked try-block that does the invoke.
- __ bl(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
// Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r5, MemOperand(ip));
+ __ mov(r5, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@@ -3738,7 +4019,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@@ -3851,10 +4132,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- __ EnterInternalFrame();
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4250,10 +4532,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4375,25 +4653,39 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label seq_string;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string.
- __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
+ // First check for flat string. None of the following string type tests will
+ // succeed if subject is not a string or a short external string.
+ __ and_(r1,
+ r0,
+ Operand(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask),
+ SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
+ // r1: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
__ b(lt, &cons_string);
- __ b(eq, &runtime);
+ __ b(eq, &external_string);
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
// String is sliced.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4404,8 +4696,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// String is a cons string, check whether it is flat.
__ bind(&cons_string);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
- __ cmp(r0, r1);
+ __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
// Is first part of cons or parent of slice a flat string?
@@ -4414,7 +4705,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &runtime);
+ __ b(ne, &external_string);
+
__ bind(&seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -4480,8 +4772,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4823,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r1, MemOperand(r1, 0));
+ __ mov(r1, Operand(isolate->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@@ -4575,16 +4865,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+ __ mov(r2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ r2,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -4615,6 +4914,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
+ // External string. Short external strings have already been ruled out.
+ // r0: scratch
+ __ bind(&external_string);
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ tst(r0, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found");
+ }
+ __ ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(subject,
+ subject,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ jmp(&seq_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4712,7 +5031,24 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // r1 : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -4727,16 +5063,12 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function);
@@ -4774,7 +5106,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_FUNCTION);
+ __ SetCallKind(r5, CALL_AS_METHOD);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -4855,100 +5187,41 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
+ __ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
+ __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
- // Handle non-flat strings.
- __ and_(result_, result_, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(result_, Operand(kExternalStringTag));
- __ b(gt, &sliced_string);
- __ b(eq, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ add(scratch_, scratch_, result_);
- __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
- // Assure that we are dealing with a sequential string. Go to runtime if not.
- __ bind(&assure_seq_string);
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // Check that parent is not an external string. Go to runtime otherwise.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(ne, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(result_, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- scratch_,
+ result_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ Push(object_, index_);
+ __ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4959,15 +5232,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
+ __ Move(index_, r0);
__ pop(object_);
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4976,6 +5248,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
+ __ mov(index_, Operand(index_, LSL, kSmiTagSize));
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
__ Move(result_, r0);
@@ -5012,7 +5285,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5037,76 +5311,13 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
@@ -5359,9 +5570,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
static const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
+ Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- Register candidate = scratch5; // Scratch register contains candidate.
-
// Calculate entry in symbol table.
if (i > 0) {
__ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
@@ -5386,11 +5596,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ cmp(undefined, candidate);
__ b(eq, not_found);
- // Must be null (deleted entry).
+ // Must be the hole (deleted entry).
if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or null");
+ __ Assert(eq, "oddball in symbol table is not undefined or the hole");
}
__ jmp(&next_probe[i]);
@@ -5418,7 +5628,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ jmp(not_found);
// Scratch register contains result when we fall through to here.
- Register result = scratch;
+ Register result = candidate;
__ bind(&found_in_symbol_table);
__ Move(r0, result);
}
@@ -5430,7 +5640,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
// hash = character + (character << 10);
__ add(hash, character, Operand(character, LSL, 10));
// hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
+ __ eor(hash, hash, Operand(hash, LSR, 6));
}
@@ -5442,7 +5652,7 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
// hash += hash << 10;
__ add(hash, hash, Operand(hash, LSL, 10));
// hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
+ __ eor(hash, hash, Operand(hash, LSR, 6));
}
@@ -5451,12 +5661,15 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// hash += hash << 3;
__ add(hash, hash, Operand(hash, LSL, 3));
// hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, ASR, 11));
+ __ eor(hash, hash, Operand(hash, LSR, 11));
// hash += hash << 15;
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+ uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+ __ and_(hash, hash, Operand(kHashShiftCutOffMask));
+
// if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, ne);
+ __ mov(hash, Operand(27), LeaveCC, eq);
}
@@ -5671,15 +5884,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r3: from index (untagged smi)
// r6 (a.k.a. to): to (smi)
// r7 (a.k.a. from): from offset (smi)
- Label allocate_slice, sliced_string, seq_string;
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kStringRepresentationMask));
- __ b(eq, &seq_string);
+ Label allocate_slice, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ tst(r1, Operand(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ b(eq, &runtime);
+ __ b(eq, &seq_or_external_string);
__ tst(r1, Operand(kSlicedNotConsMask));
__ b(ne, &sliced_string);
@@ -5698,8 +5908,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
__ jmp(&allocate_slice);
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
__ mov(r5, r0);
__ bind(&allocate_slice);
@@ -6425,12 +6635,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ push(ip);
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6469,14 +6680,13 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -6534,14 +6744,12 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
__ tst(r0, Operand(r0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
__ b(ne, miss);
- return result;
}
@@ -6556,6 +6764,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
+ ASSERT(!elements.is(scratch1));
+ ASSERT(!elements.is(scratch2));
+ ASSERT(!name.is(scratch1));
+ ASSERT(!name.is(scratch2));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -6599,8 +6812,14 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
~(scratch1.bit() | scratch2.bit());
__ stm(db_w, sp, spill_mask);
- __ Move(r0, elements);
- __ Move(r1, name);
+ if (name.is(r0)) {
+ ASSERT(!elements.is(r1));
+ __ Move(r1, name);
+ __ Move(r0, elements);
+ } else {
+ __ Move(r0, elements);
+ __ Move(r1, name);
+ }
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
__ tst(r0, Operand(r0));
@@ -6613,6 +6832,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@@ -6702,6 +6923,333 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { r6, r4, r7, EMIT_REMEMBERED_SET },
+ { r6, r2, r7, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+ // Also used in KeyedStoreIC::GenerateGeneric.
+ { r3, r4, r5, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { r4, r1, r2, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r1, r2, r3, EMIT_REMEMBERED_SET },
+ { r3, r2, r1, EMIT_REMEMBERED_SET },
+ // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r2, r1, r3, EMIT_REMEMBERED_SET },
+ { r3, r1, r2, EMIT_REMEMBERED_SET },
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { r4, r2, r3, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { r2, r3, r9, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { r6, r2, r0, EMIT_REMEMBERED_SET },
+ { r2, r6, r9, EMIT_REMEMBERED_SET },
+ // StoreArrayLiteralElementStub::Generate
+ { r5, r0, r6, EMIT_REMEMBERED_SET },
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+ return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(r0));
+ __ Move(address, regs_.address());
+ __ Move(r0, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(r1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ ldr(r1, MemOperand(address, 0));
+ }
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : element value to store
+ // -- r1 : array literal
+ // -- r2 : map of array literal
+ // -- r3 : element index as smi
+ // -- r4 : array literal index in function as smi
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ __ CheckFastElements(r2, r5, &double_elements);
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(r0, &smi_element);
+ __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ // call.
+ __ Push(r1, r3, r0);
+ __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
+ __ Push(r5, r4);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r0, MemOperand(r6, 0));
+ // Update the write barrier for the array store.
+ __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10,
+ &slow_elements);
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 557f7e6d4..38ed476cc 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -58,6 +58,25 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -117,7 +136,7 @@ class UnaryOpStub: public CodeStub {
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -216,7 +235,7 @@ class BinaryOpStub: public CodeStub {
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
@@ -225,6 +244,70 @@ class BinaryOpStub: public CodeStub {
};
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -323,6 +406,9 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
+ bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+
private:
Register the_int_;
Register the_heap_number_;
@@ -371,6 +457,218 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+ ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+ ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ Assembler::kInstrSize);
+
+ if (Assembler::IsBranch(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+ if (Assembler::IsBranch(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(Assembler::IsTstImmediate(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL,
+ stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->pop(scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ masm->sub(sp,
+ sp,
+ Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ // Save all VFP registers except d0.
+ for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+ }
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ // Restore all VFP registers except d0.
+ for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+ }
+ masm->add(sp,
+ sp,
+ Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ }
+ masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ Register GetRegThatIsNotOneOf(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 4> {};
+ class ValueBits: public BitField<int, 4, 4> {};
+ class AddressBits: public BitField<int, 8, 4> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
@@ -558,14 +856,13 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0);
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
@@ -575,6 +872,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -587,7 +886,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index bf748a9b6..3371e8a6b 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -30,22 +30,367 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : target map, scratch for subsequent call
+ // -- r4 : scratch (elements)
+ // -----------------------------------
+ // Set transitioned map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : target map, scratch for subsequent call
+ // -- r4 : scratch (elements)
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+ __ push(lr);
+
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ // r4: source FixedArray
+ // r5: number of elements (smi-tagged)
+
+ // Allocate new FixedDoubleArray.
+ __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(lr, lr, Operand(r5, LSL, 2));
+ __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ // r6: destination FixedDoubleArray, not tagged as heap object
+ __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+ // Set destination FixedDoubleArray's length.
+ __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+ // Update receiver's map.
+
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ add(r3, r6, Operand(kHeapObjectTag));
+ __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ RecordWriteField(r2,
+ JSObject::kElementsOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r6, r7, Operand(r5, LSL, 2));
+ __ mov(r4, Operand(kHoleNanLower32));
+ __ mov(r5, Operand(kHoleNanUpper32));
+ // r3: begin of source FixedArray element fields, not tagged
+ // r4: kHoleNanLower32
+ // r5: kHoleNanUpper32
+ // r6: end of destination FixedDoubleArray, not tagged
+ // r7: begin of FixedDoubleArray element fields, not tagged
+ if (!vfp3_supported) __ Push(r1, r0);
+
+ __ b(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(lr);
+ __ b(fail);
+
+ // Convert and copy elements.
+ __ bind(&loop);
+ __ ldr(r9, MemOperand(r3, 4, PostIndex));
+ // r9: current element
+ __ JumpIfNotSmi(r9, &convert_hole);
+
+ // Normal smi, convert to double and store.
+ __ SmiUntag(r9);
+ if (vfp3_supported) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r9);
+ __ vcvt_f64_s32(d0, s0);
+ __ vstr(d0, r7, 0);
+ __ add(r7, r7, Operand(8));
+ } else {
+ FloatingPointHelper::ConvertIntToDouble(masm,
+ r9,
+ FloatingPointHelper::kCoreRegisters,
+ d0,
+ r0,
+ r1,
+ lr,
+ s0);
+ __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
+ }
+ __ b(&entry);
+
+ // Hole found, store the-hole NaN.
+ __ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, "object found in smi-only array");
+ }
+ __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+
+ __ bind(&entry);
+ __ cmp(r7, r6);
+ __ b(lt, &loop);
+
+ if (!vfp3_supported) __ Pop(r1, r0);
+ __ pop(lr);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : target map, scratch for subsequent call
+ // -- r4 : scratch (elements)
+ // -----------------------------------
+ Label entry, loop, convert_hole, gc_required;
+
+ __ push(lr);
+ __ Push(r3, r2, r1, r0);
+
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ // r4: source FixedDoubleArray
+ // r5: number of elements (smi-tagged)
+
+ // Allocate new FixedArray.
+ __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r0, r0, Operand(r5, LSL, 1));
+ __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ // r6: destination FixedArray, not tagged as heap object
+ __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+ // Set destination FixedDoubleArray's length.
+ __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+
+ // Prepare for conversion loop.
+ __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ add(r3, r6, Operand(FixedArray::kHeaderSize));
+ __ add(r6, r6, Operand(kHeapObjectTag));
+ __ add(r5, r3, Operand(r5, LSL, 1));
+ __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses in r4 to fully take advantage of post-indexing.
+ // r3: begin of destination FixedArray element fields, not tagged
+ // r4: begin of source FixedDoubleArray element fields, not tagged, +4
+ // r5: end of destination FixedArray, not tagged
+ // r6: destination FixedArray
+ // r7: the-hole pointer
+ // r9: heap number map
+ __ b(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ Pop(r3, r2, r1, r0);
+ __ pop(lr);
+ __ b(fail);
+
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(r4, 8, PostIndex));
+ // lr: current element's upper 32 bit
+ // r4: address of next element's upper 32 bit
+ __ cmp(r1, Operand(kHoleNanUpper32));
+ __ b(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
+ // r2: new heap number
+ __ ldr(r0, MemOperand(r4, 12, NegOffset));
+ __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
+ __ mov(r0, r3);
+ __ str(r2, MemOperand(r3, 4, PostIndex));
+ __ RecordWrite(r6,
+ r0,
+ r2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ str(r7, MemOperand(r3, 4, PostIndex));
+
+ __ bind(&entry);
+ __ cmp(r3, r5);
+ __ b(lt, &loop);
+
+ __ Pop(r3, r2, r1, r0);
+ // Update receiver's map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ RecordWriteField(r2,
+ JSObject::kElementsOffset,
+ r6,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(lr);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ tst(result, Operand(kIsIndirectStringMask));
+ __ b(eq, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ tst(result, Operand(kSlicedNotConsMask));
+ __ b(eq, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ add(index, index, Operand(result, ASR, kSmiTagSize));
+ __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result, ip);
+ __ b(ne, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(ne, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ add(string,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ jmp(&check_encoding);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ tst(result, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found");
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ tst(result, Operand(kShortExternalStringMask));
+ __ b(ne, call_runtime);
+ __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ tst(result, Operand(kStringEncodingMask));
+ __ b(ne, &ascii);
+ // Two-byte string.
+ __ ldrh(result, MemOperand(string, index, LSL, 1));
+ __ jmp(&done);
+ __ bind(&ascii);
+ // Ascii string.
+ __ ldrb(result, MemOperand(string, index));
+ __ bind(&done);
}
+#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index d27982aba..c340e6b10 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -29,7 +29,6 @@
#define V8_ARM_CODEGEN_ARM_H_
#include "ast.h"
-#include "code-stubs-arm.h"
#include "ic-inl.h"
namespace v8 {
@@ -69,21 +68,26 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- return FLAG_debug_code ? 32 : 13;
- }
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- return Isolate::Current()->inlined_write_barrier_size() + 4;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 823c6ff7e..49b8db79f 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -87,22 +87,21 @@ namespace v8 {
namespace internal {
// Constant pool marker.
-static const int kConstantPoolMarkerMask = 0xffe00000;
-static const int kConstantPoolMarker = 0x0c000000;
-static const int kConstantPoolLengthMask = 0x001ffff;
+const int kConstantPoolMarkerMask = 0xffe00000;
+const int kConstantPoolMarker = 0x0c000000;
+const int kConstantPoolLengthMask = 0x001ffff;
// Number of registers in normal ARM mode.
-static const int kNumRegisters = 16;
+const int kNumRegisters = 16;
// VFP support.
-static const int kNumVFPSingleRegisters = 32;
-static const int kNumVFPDoubleRegisters = 16;
-static const int kNumVFPRegisters =
- kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+const int kNumVFPSingleRegisters = 32;
+const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
-static const int kPCRegister = 15;
-static const int kNoRegister = -1;
+const int kPCRegister = 15;
+const int kNoRegister = -1;
// -----------------------------------------------------------------------------
// Conditions.
@@ -371,9 +370,9 @@ enum SoftwareInterruptCodes {
// stop
kStopCode = 1 << 23
};
-static const uint32_t kStopCodeMask = kStopCode - 1;
-static const uint32_t kMaxStopCode = kStopCode - 1;
-static const int32_t kDefaultStopCode = -1;
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
@@ -391,17 +390,17 @@ enum VFPConversionMode {
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-static const uint32_t kVFPInexactExceptionBit = 1 << 4;
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPExceptionMask = 0xf;
+const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+const uint32_t kVFPInexactExceptionBit = 1 << 4;
+const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+const uint32_t kVFPNConditionFlagBit = 1 << 31;
+const uint32_t kVFPZConditionFlagBit = 1 << 30;
+const uint32_t kVFPCConditionFlagBit = 1 << 29;
+const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
@@ -418,7 +417,7 @@ enum VFPRoundingMode {
kRoundToZero = RZ
};
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
+const uint32_t kVFPRoundingModeMask = 3 << 22;
enum CheckForInexactConversion {
kCheckForInexactConversion,
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 07a22722c..837410302 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -132,55 +132,57 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ tst(reg, Operand(0xc0000000));
+ __ Assert(eq, "Unable to encode value as smi");
+ }
+ __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
+ __ stm(db_w, sp, object_regs | non_object_regs);
}
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
+ __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ ldm(ia_w, sp, object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
}
}
- }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
@@ -265,11 +267,11 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- r1 : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 00357f76d..4b54b6dbc 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -44,12 +44,6 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- // Nothing to do. No new relocation information is written for lazy
- // deoptimization on ARM.
-}
-
-
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
@@ -58,66 +52,51 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code.
Code* code = function->code();
+ Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each return after a safepoint insert an absolute call to the
- // corresponding deoptimization entry.
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
- // Check that we did not shoot past next safepoint.
- CHECK(pc_offset >= last_pc_offset);
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
- // Destroy the code which is not supposed to be run again.
- int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (int x = 0; x < instructions; x++) {
- destroyer.masm()->bkpt(0);
- }
+ Address prev_call_address = NULL;
#endif
- last_pc_offset = pc_offset;
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
- deoptimization_index, Deoptimizer::LAZY);
- last_pc_offset += gap_code_size;
- int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
- RelocInfo::NONE);
- int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
- ASSERT(call_size_in_bytes <= patch_size());
- CodePatcher patcher(code->instruction_start() + last_pc_offset,
- call_size_in_words);
- patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
- last_pc_offset += call_size_in_bytes;
- }
- }
-
-
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+ RelocInfo::NONE);
+ int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+ ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+ ASSERT(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(call_address, call_size_in_words);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
- // Destroy the code which is not supposed to be run again.
- int instructions =
- (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (int x = 0; x < instructions; x++) {
- destroyer.masm()->bkpt(0);
- }
+ prev_call_address = call_address;
#endif
+ }
+
+ Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -125,16 +104,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
}
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@@ -169,10 +144,14 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@@ -193,6 +172,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, check_code);
}
@@ -632,7 +614,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@@ -686,8 +671,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
+ }
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
@@ -703,7 +691,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
- // __ add(r6, r2, Operand(r3, LSL, 1));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
@@ -737,8 +724,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove lr
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address(isolate);
- __ mov(r10, Operand(roots_address));
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate);
+ __ mov(r10, Operand(roots_array_start));
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 26bbd82d0..184414986 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -35,22 +35,22 @@ namespace internal {
// The ARM ABI does not specify the usage of register r9, which may be reserved
// as the static base or thread register on some platforms, in which case we
// leave it alone. Adjust the value of kR9Available accordingly:
-static const int kR9Available = 1; // 1 if available to us, 0 if reserved
+const int kR9Available = 1; // 1 if available to us, 0 if reserved
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 16;
+const int kNumRegs = 16;
// Caller-saved/arguments registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
1 << 0 | // r0 a1
1 << 1 | // r1 a2
1 << 2 | // r2 a3
1 << 3; // r3 a4
-static const int kNumJSCallerSaved = 4;
+const int kNumJSCallerSaved = 4;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
@@ -60,7 +60,7 @@ int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
@@ -70,36 +70,45 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
-static const int kNumCalleeSaved = 7 + kR9Available;
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+const RegList kCallerSaved =
+ 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 9; // r9
+
+
+const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
-static const int kNumDoubleCalleeSaved = 8;
+const int kNumDoubleCalleeSaved = 8;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
- kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kStateOffset = 1 * kPointerSize;
- static const int kContextOffset = 2 * kPointerSize;
- static const int kFPOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = kFPOffset + kPointerSize;
};
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 50ed8b1da..fdd326618 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -39,6 +39,7 @@
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
+#include "arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -46,11 +47,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
@@ -131,6 +127,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -145,7 +143,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
- if (info->is_strict_mode() || info->is_native()) {
+ if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
@@ -155,6 +153,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@@ -200,13 +203,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(var->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ str(r0, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
}
@@ -234,7 +236,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict_mode()) {
+ if (!is_classic_mode()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -264,7 +266,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -391,7 +396,7 @@ void FullCodeGenerator::TestContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -414,7 +419,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -449,7 +454,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -508,7 +513,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -575,7 +580,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -665,17 +670,20 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
+
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWrite(scratch0,
- Operand(Context::SlotOffset(var->index())),
- scratch1,
- src);
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -686,13 +694,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ b(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
@@ -703,13 +705,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@@ -721,7 +725,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
@@ -746,10 +750,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ mov(r1, Operand(cp));
- __ RecordWrite(r1, Operand(offset), r2, result_register());
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ r2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
@@ -761,11 +771,13 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration");
__ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+ ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -775,7 +787,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@@ -917,11 +929,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(r0);
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+ __ b(le, &call_runtime);
+
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
+ Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -1000,9 +1018,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register r0. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
- __ Push(r1, r0);
+ __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
+ __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
+ __ b(gt, &non_proxy);
+ __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
@@ -1019,18 +1044,23 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register r2.
__ ldr(r2, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ ldr(r1, MemOperand(sp, 4 * kPointerSize));
__ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ __ cmp(r2, Operand(Smi::FromInt(0)));
+ __ b(eq, &update_each);
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1085,7 +1115,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->language_mode());
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
@@ -1116,7 +1146,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1129,7 +1159,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1173,7 +1203,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1205,15 +1235,24 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ if (local->mode() == CONST) {
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ } else { // LET || CONST_HARMONY
+ __ b(ne, done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ jmp(done);
}
@@ -1246,24 +1285,64 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == Variable::LET) {
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(r0, var);
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ b(ne, &done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ }
+ context()->Plug(r0);
+ break;
}
- context()->Plug(r0);
}
+ context()->Plug(var);
break;
}
@@ -1337,10 +1416,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_properties()));
+ __ mov(r1, Operand(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1349,10 +1429,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1386,9 +1471,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1447,13 +1532,20 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_elements()));
+ __ mov(r1, Operand(constant_elements));
__ Push(r3, r2, r1);
- if (expr->constant_elements()->map() ==
+ if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1465,8 +1557,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode = has_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1489,15 +1586,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- // Store the subexpression value in the array's elements.
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ str(result_register(), FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array store with r0 as the scratch
- // register.
- __ RecordWrite(r1, Operand(offset), r2, result_register());
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ ldr(r6, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
+ __ str(result_register(), FieldMemOperand(r1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(r1, offset, result_register(), r2,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ ldr(r1, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+ __ mov(r3, Operand(Smi::FromInt(i)));
+ __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1629,7 +1734,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1637,7 +1742,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1785,9 +1890,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic);
break;
}
@@ -1798,9 +1903,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r2);
__ pop(r0); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic);
break;
}
@@ -1816,9 +1921,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -1844,12 +1949,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
@@ -1869,12 +1974,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(r1, Operand(offset), r2, r3);
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1887,13 +1994,15 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
- __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
@@ -1930,9 +2039,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(r1);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1976,9 +2085,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(r2);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -2083,6 +2192,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2091,8 +2201,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2101,22 +2210,20 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
__ push(r1);
- // Push the receiver of the enclosing function and do runtime call.
+ // Push the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(r1);
- // Push the strict mode flag. In harmony mode every eval call
- // is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
- __ mov(r1, Operand(Smi::FromInt(strict_mode)));
+ // Push the language mode.
+ __ mov(r1, Operand(Smi::FromInt(language_mode())));
+ __ push(r1);
+
+ // Push the start position of the scope the calls resides in.
+ __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
__ push(r1);
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2150,28 +2257,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(r0);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
@@ -2182,6 +2272,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2295,7 +2386,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2307,7 +2399,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask));
Split(eq, if_true, if_false, fall_through);
@@ -2315,7 +2407,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2327,7 +2420,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
Split(eq, if_true, if_false, fall_through);
@@ -2335,7 +2428,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2360,14 +2454,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, if_false);
__ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2381,14 +2476,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2404,7 +2500,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ne, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2412,8 +2508,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
-
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2492,12 +2588,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2511,14 +2608,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2532,14 +2630,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2553,7 +2652,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2561,8 +2660,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2585,14 +2684,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2608,14 +2708,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(r1);
__ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@@ -2629,9 +2730,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2651,7 +2751,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2662,20 +2763,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0.
__ b(lt, &null);
-
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
- __ b(ge, &function);
-
- // Check if the constructor in the map is a function.
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ b(eq, &function);
+
+ __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ b(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor);
@@ -2707,7 +2812,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2715,6 +2820,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2728,9 +2834,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2750,7 +2855,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(1, r0);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP3);
@@ -2770,8 +2876,9 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
+ __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ mov(r0, Operand(r4));
- __ mov(r1, Operand(ExternalReference::isolate_address()));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -2780,9 +2887,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2792,9 +2900,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2805,9 +2914,9 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
@@ -2823,8 +2932,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2834,9 +2944,9 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(r1); // r0 = value. r1 = object.
@@ -2853,16 +2963,18 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+ __ mov(r2, r0);
+ __ RecordWriteField(
+ r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
-
// Load the argument on the stack and call the stub.
VisitForStackValue(args->at(0));
@@ -2872,9 +2984,9 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
Label done;
@@ -2890,15 +3002,14 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
Register object = r1;
Register index = r0;
- Register scratch = r2;
Register result = r3;
__ pop(object);
@@ -2908,7 +3019,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2937,16 +3047,15 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
Register object = r1;
Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ pop(object);
@@ -2956,8 +3065,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -2986,9 +3094,9 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2998,9 +3106,9 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3010,10 +3118,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3021,10 +3130,23 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallStub(&stub);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3032,10 +3154,11 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3043,8 +3166,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3052,7 +3176,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3061,18 +3186,31 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
+ __ b(eq, &proxy);
+
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(r0);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3082,7 +3220,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3141,16 +3280,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
- Label new_space;
- __ InNewSpace(elements, scratch1, eq, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ scratch1,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
- __ mov(scratch1, elements);
- __ RecordWriteHelper(elements, index1, scratch2);
- __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index1,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index2,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3164,9 +3318,9 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
ASSERT_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
@@ -3215,7 +3369,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = r0;
@@ -3255,7 +3410,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
@@ -3267,14 +3423,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3289,12 +3446,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
-
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3571,7 +3728,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -3579,7 +3738,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@@ -3622,18 +3781,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r0);
+ __ jmp(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r0);
+ __ bind(&done);
}
break;
}
@@ -3826,9 +4002,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3843,9 +4019,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3892,20 +4068,25 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(r0, if_true);
@@ -3942,9 +4123,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- Split(ge, if_true, if_false, fall_through);
-
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
+ __ b(eq, if_true);
+ __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+ Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
@@ -3963,18 +4146,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -3982,9 +4154,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
-
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -3992,20 +4167,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
@@ -4015,7 +4183,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
__ tst(r0, r0);
Split(eq, if_true, if_false, fall_through);
@@ -4029,33 +4197,25 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cond = eq;
- __ pop(r1);
break;
case Token::LT:
cond = lt;
- __ pop(r1);
break;
case Token::GT:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cond = lt;
- __ mov(r1, result_register());
- __ pop(r0);
+ cond = gt;
break;
case Token::LTE:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cond = ge;
- __ mov(r1, result_register());
- __ pop(r0);
+ cond = le;
break;
case Token::GTE:
cond = ge;
- __ pop(r1);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
+ __ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4073,7 +4233,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
@@ -4085,8 +4245,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4094,15 +4255,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r1, nil_value);
__ cmp(r0, r1);
- if (expr->is_strict()) {
+ if (expr->op() == Token::EQ_STRICT) {
Split(eq, if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ b(eq, if_true);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r1, other_nil_value);
__ cmp(r0, r1);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 2e49cae92..f8e4bbb6b 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -208,7 +208,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
}
@@ -381,10 +382,10 @@ Object* CallIC_Miss(Arguments args);
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
@@ -394,7 +395,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
@@ -463,7 +464,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
}
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -485,10 +486,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
+ // Push the receiver and the name of the function.
+ __ Push(r3, r2);
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ mov(r0, Operand(2));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- __ LeaveInternalFrame();
+ // Move result to r1 and leave the internal frame.
+ __ mov(r1, Operand(r0));
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -539,7 +541,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -551,18 +553,6 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
@@ -578,27 +568,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -650,12 +619,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- __ EnterInternalFrame();
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r2); // save the key
+ __ Push(r1, r2); // pass the receiver and the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(r2); // restore the key
+ }
__ mov(r1, r0);
__ jmp(&do_call);
@@ -715,7 +685,7 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@@ -908,7 +878,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
__ add(r6, r3, r5);
- __ RecordWrite(r3, r6, r9);
+ __ mov(r9, r0);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
@@ -916,7 +887,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
__ add(r6, r3, r4);
- __ RecordWrite(r3, r6, r9);
+ __ mov(r9, r0);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -1137,14 +1109,12 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register receiver = r1;
Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1239,6 +1209,47 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r2 : receiver
+ // -- r3 : target map
+ // -- lr : return address
+ // -----------------------------------
+ // Must return the modified receiver in r0.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ push(r2);
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r2 : receiver
+ // -- r3 : target map
+ // -- lr : return address
+ // -----------------------------------
+ // Must return the modified receiver in r0.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ push(r2);
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
@@ -1267,13 +1278,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, array, extra, check_if_double_array;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
+ Register elements_map = r6;
+ Register receiver_map = r7;
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1281,35 +1296,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
- __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
- __ cmp(r4, Operand(JS_PROXY_TYPE));
- __ b(eq, &slow);
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
- __ b(lo, &fast);
+ __ b(lo, &fast_object_with_map_check);
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1330,21 +1336,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &check_if_double_array);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast);
+ __ b(&fast_object_without_map_check);
+
+ __ bind(&check_if_double_array);
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ // Add 1 to key, and go to common element store code for doubles.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(r4, key, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1352,18 +1368,57 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra);
// Fall through to fast case.
- __ bind(&fast);
- // Fast case, store the value to the elements backing store.
- __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(r5));
- // Skip write barrier if the written value is a smi.
- __ tst(value, Operand(kSmiTagMask));
- __ Ret(eq);
+ __ bind(&fast_object_with_map_check);
+ Register scratch_value = r4;
+ Register address = r5;
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &fast_double_with_map_check);
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to slow case when writing non-smi into smi-only array.
+ __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Fast elements array, store the value to the elements backing store.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, Operand(r4), r5, r6);
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements,
+ r4,
+ r5,
+ r6,
+ r7,
+ &slow);
__ Ret();
}
@@ -1510,11 +1565,9 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return lt;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return lt;
+ return gt;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return ge;
+ return le;
case Token::GTE:
return ge;
default:
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 30ccd05be..234177476 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -212,10 +212,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -227,6 +228,13 @@ void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream);
@@ -241,6 +249,14 @@ void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(0)->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -390,6 +406,12 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
@@ -711,7 +733,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -741,7 +765,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -753,7 +777,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -811,28 +836,6 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
}
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
@@ -994,10 +997,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1007,7 +1013,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1016,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1206,8 +1211,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+ return MarkAsCall(DefineFixed(new LCallFunction(function), r0), instr);
}
@@ -1232,8 +1238,24 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineAsRegister(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
}
@@ -1244,16 +1266,6 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1399,12 +1411,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
- LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1416,8 +1426,8 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
@@ -1444,9 +1454,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
+ return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1457,6 +1467,13 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
}
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(instr->value()));
@@ -1471,6 +1488,17 @@ LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
}
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
@@ -1734,7 +1762,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
+ return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1748,14 +1776,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- if (instr->check_hole_value()) {
- LOperand* temp = TempRegister();
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(new LStoreGlobalCell(value, temp));
- } else {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new LStoreGlobalCell(value, NULL);
- }
+ LOperand* temp = TempRegister();
+ LOperand* value = UseTempRegister(instr->value());
+ LInstruction* result = new LStoreGlobalCell(value, temp);
+ if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+ return result;
}
@@ -1968,6 +1993,26 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, NULL);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), r0);
+ LOperand* fixed_object_reg = FixedTemp(r2);
+ LOperand* new_map_reg = FixedTemp(r3);
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2025,8 +2070,14 @@ LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+ HObjectLiteralGeneric* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 8c18760fd..6051ad973 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -107,10 +107,12 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -132,7 +134,8 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -162,6 +165,7 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -627,16 +631,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
public:
- explicit LIsNullAndBranch(LOperand* value) {
+ explicit LIsNilAndBranch(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
- bool is_strict() const { return hydrogen()->is_strict(); }
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -656,6 +661,20 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -684,6 +703,23 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
};
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -794,18 +830,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1226,7 +1259,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(1); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1259,7 +1292,6 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1276,7 +1308,9 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@@ -1379,12 +1413,17 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1560,7 +1599,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1580,7 +1618,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1642,7 +1680,7 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1668,6 +1706,30 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
};
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
@@ -1838,10 +1900,17 @@ class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
@@ -2159,12 +2228,12 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index f5d744914..22a504fc6 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -40,37 +40,22 @@ class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
- int deoptimization_index)
+ Safepoint::DeoptMode mode)
: codegen_(codegen),
pointers_(pointers),
- deoptimization_index_(deoptimization_index) { }
+ deopt_mode_(mode) { }
virtual ~SafepointGenerator() { }
- virtual void BeforeCall(int call_size) const {
- ASSERT(call_size >= 0);
- // Ensure that we have enough space after the previous safepoint position
- // for the generated code there.
- int call_end = codegen_->masm()->pc_offset() + call_size;
- int prev_jump_end =
- codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
- if (call_end < prev_jump_end) {
- int padding_size = prev_jump_end - call_end;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- codegen_->masm()->nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
+ virtual void BeforeCall(int call_size) const { }
virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
private:
LCodeGen* codegen_;
LPointerMap* pointers_;
- int deoptimization_index_;
+ Safepoint::DeoptMode deopt_mode_;
};
@@ -82,6 +67,14 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
+
+ CodeStub::GenerateFPStubs();
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -95,7 +88,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
@@ -151,7 +143,7 @@ bool LCodeGen::GeneratePrologue() {
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
- if (info_->is_strict_mode() || info_->is_native()) {
+ if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
@@ -192,7 +184,7 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -206,13 +198,11 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(var->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ str(r0, target);
+ // Update the write barrier. This clobbers r3 and r0.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -243,35 +233,23 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
}
+ EnsureSpaceForLazyDeopt();
return !is_aborted();
}
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
-
- // Pad code to ensure that the last piece of deferred code have
- // room for lazy bailout.
- while ((masm()->pc_offset() - LastSafepointEnd())
- < Deoptimizer::patch_size()) {
- __ nop();
- }
}
// Force constant pool emission at the end of the deferred code to make
@@ -401,6 +379,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
@@ -551,7 +535,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
- RegisterLazyDeoptimization(instr, safepoint_mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -571,7 +555,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -580,37 +564,12 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
LInstruction* instr) {
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(),
- 0,
- deoptimization_environment->deoptimization_index());
- }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -632,14 +591,17 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
Translation translation(&translations_, frame_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment);
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -701,6 +663,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
@@ -732,16 +695,28 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index) {
+ Safepoint::DeoptMode deopt_mode) {
ASSERT(expected_safepoint_kind_ == kind);
- const ZoneList<LOperand*>* operands = pointers->operands();
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
+ kind, arguments, deopt_mode);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
@@ -758,31 +733,31 @@ void LCodeGen::RecordSafepoint(
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
}
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
+ RecordSafepoint(&empty_pointers, deopt_mode);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
}
void LCodeGen::RecordSafepointWithRegistersAndDoubles(
LPointerMap* pointers,
int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
- deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
}
@@ -817,12 +792,6 @@ void LCodeGen::DoGap(LGap* gap) {
LParallelMove* move = gap->GetParallelMove(inner_pos);
if (move != NULL) DoParallelMove(move);
}
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
}
@@ -1032,6 +1001,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
@@ -1129,7 +1099,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
0,
- Safepoint::kNoDeoptimizationIndex);
+ Safepoint::kNoLazyDeopt);
// Overwrite the stored value of r0 with the result of the stub.
__ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
}
@@ -1695,30 +1665,44 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- __ cmp(ToRegister(left), ToRegister(right));
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
} else {
- EmitCmpI(left, right);
+ if (instr->is_double()) {
+ // Compare left and right operands as doubles and load the
+ // resulting flags into the normal status register.
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ if (right->IsConstantOperand()) {
+ __ cmp(ToRegister(left),
+ Operand(ToInteger32(LConstantOperand::cast(right))));
+ } else if (left->IsConstantOperand()) {
+ __ cmp(ToRegister(right),
+ Operand(ToInteger32(LConstantOperand::cast(left))));
+ // We transposed the operands. Reverse the condition.
+ cond = ReverseCondition(cond);
+ } else {
+ __ cmp(ToRegister(left), ToRegister(right));
+ }
+ }
+ EmitBranch(true_block, false_block, cond);
}
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
}
@@ -1743,25 +1727,35 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Jump to the false block.
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ EmitGoto(false_block);
+ return;
+ }
int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(ip, nil_value);
__ cmp(reg, ip);
- if (instr->is_strict()) {
+ if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, eq);
} else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, other_nil_value);
__ cmp(reg, ip);
__ b(eq, true_label);
__ JumpIfSmi(reg, false_label);
@@ -1818,6 +1812,31 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsString(reg, temp1, false_label);
+
+ EmitBranch(true_block, false_block, true_cond);
+}
+
+
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1843,6 +1862,41 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+
+ Condition condition = ComputeCompareCondition(op);
+
+ EmitBranch(true_block, false_block, condition);
+}
+
+
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1918,28 +1972,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(ge, is_true);
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, is_false);
+ __ b(eq, is_true);
+ __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ b(eq, is_true);
} else {
- __ b(ge, is_false);
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ b(gt, is_false);
}
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -2014,11 +2076,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
-
+ virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
-
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -2082,8 +2143,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
Register result = ToRegister(instr->result());
ASSERT(result.is(r0));
@@ -2115,32 +2176,15 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LEnvironment* env = instr->deoptimization_environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
}
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -2149,9 +2193,6 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
@@ -2180,7 +2221,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->check_hole_value()) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
@@ -2203,6 +2244,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@@ -2211,8 +2253,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
- if (instr->hydrogen()->check_hole_value()) {
- Register scratch2 = ToRegister(instr->TempAt(0));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2222,6 +2263,21 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+ // Cells are always in the remembered set.
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteField(scratch,
+ JSGlobalPropertyCell::kValueOffset,
+ value,
+ scratch2,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ check_needed);
+ }
}
@@ -2230,7 +2286,7 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(r0));
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2247,10 +2303,20 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ str(value, ContextOperand(context, instr->slot_index()));
- if (instr->needs_write_barrier()) {
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, Operand(offset), value, scratch0());
+ MemOperand target = ContextOperand(context, instr->slot_index());
+ __ str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch0(),
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -2271,7 +2337,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2500,13 +2566,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // TODO(danno): If no hole check is required, there is no need to allocate
- // elements into a temporary register, instead scratch can be used.
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
__ vldr(result, elements, 0);
}
@@ -2577,6 +2639,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2712,12 +2775,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
v8::internal::ParameterCount actual(receiver);
@@ -2740,7 +2800,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2799,7 +2859,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ Call(ip);
// Setup deoptimization.
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
// Restore context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2906,6 +2966,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -3109,6 +3170,14 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3148,6 +3217,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSin:
DoMathSin(instr);
break;
+ case kMathTan:
+ DoMathTan(instr);
+ break;
case kMathLog:
DoMathLog(instr);
break;
@@ -3163,10 +3235,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(instr->HasPointerMap());
ASSERT(instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3199,12 +3269,12 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3258,19 +3328,36 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ str(value, FieldMemOperand(object, offset));
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, Operand(offset), value, scratch);
+ __ RecordWriteField(object,
+ offset,
+ value,
+ scratch,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ str(value, FieldMemOperand(scratch, offset));
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWrite(scratch, Operand(offset), value, object);
+ __ RecordWriteField(scratch,
+ offset,
+ value,
+ object,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
}
@@ -3282,7 +3369,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3301,6 +3388,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ __ tst(value, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment());
+ }
+
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3314,9 +3408,18 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
+ __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3417,6 +3520,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3431,13 +3535,55 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ Label not_applicable;
+ __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ cmp(scratch, Operand(from_map));
+ __ b(ne, &not_applicable);
+ __ mov(new_map_reg, Operand(to_map));
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ scratch, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(r2));
+ ASSERT(new_map_reg.is(r3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(r2));
+ ASSERT(new_map_reg.is(r3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@@ -3452,87 +3598,19 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- // Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ tst(result, Operand(kIsIndirectStringMask));
- __ b(eq, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ tst(result, Operand(kSlicedNotConsMask));
- __ b(eq, &cons_string);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
- __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result, ip);
- __ b(ne, deferred->entry());
- // Get the first of the two strings and load its instance type.
- __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, deferred->entry());
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- __ add(result,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ ldrh(result, MemOperand(result, index, LSL, 1));
- __ jmp(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(result,
- string,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ ldrb(result, MemOperand(result, index));
-
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -3575,6 +3653,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3646,6 +3725,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -3711,6 +3791,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3819,16 +3900,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0();
@@ -3911,6 +3982,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4150,10 +4231,15 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
+ __ mov(r1, Operand(constant_elements));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@@ -4170,26 +4256,106 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(r2));
+ ASSERT(!result.is(r2));
+
+ // Increase the offset so that subsequent objects end up right after
+ // this one.
+ int current_offset = *offset;
+ int size = object->map()->instance_size();
+ *offset += size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ ASSERT(object->elements()->length() == 0 ||
+ object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ __ ldr(r2, FieldMemOperand(source, i));
+ __ str(r2, FieldMemOperand(result, current_offset + i));
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ add(r2, result, Operand(*offset));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ } else {
+ __ mov(r2, Operand(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ }
+ }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ mov(r0, Operand(Smi::FromInt(size)));
+ __ push(r0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
__ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
__ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
- __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+ __ mov(r2, Operand(constant_properties));
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ __ mov(r1, Operand(Smi::FromInt(flags)));
__ Push(r4, r3, r2, r1);
// Pick the right runtime function to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4262,8 +4428,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(shared_info->language_mode());
__ mov(r1, Operand(shared_info));
__ push(r1);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -4296,8 +4461,9 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
false_label,
input,
instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(true_block, false_block, final_branch_condition);
+ }
}
@@ -4343,10 +4509,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch,
- FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- final_branch_condition = ge;
+ __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+ __ b(eq, true_label);
+ __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+ final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4365,9 +4533,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = eq;
} else {
- final_branch_condition = ne;
__ b(false_label);
- // A dead branch instruction will be generated after this point.
}
return final_branch_condition;
@@ -4403,9 +4569,29 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
+ EnsureSpaceForLazyDeopt();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4422,12 +4608,9 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
__ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
@@ -4438,27 +4621,20 @@ void LCodeGen::DoIn(LIn* instr) {
__ Push(key, obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RegisterLazyDeoptimization(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- }
-
- // The gap code includes the restoring of the safepoint registers.
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4468,10 +4644,15 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
if (instr->hydrogen()->is_function_entry()) {
// Perform stack overflow check.
Label done;
@@ -4480,7 +4661,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ b(hs, &done);
StackCheckStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ EnsureSpaceForLazyDeopt();
__ bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -4489,8 +4673,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
}
}
@@ -4506,7 +4695,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index ead848903..363449a50 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
+ last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -86,6 +87,7 @@ class LCodeGen BASE_EMBEDDED {
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
@@ -111,8 +113,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -139,8 +141,8 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- int strict_mode_flag() const {
- return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+ StrictModeFlag strict_mode_flag() const {
+ return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -206,7 +208,7 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr);
// Generate a direct call to a known function. Expects the function
- // to be in edi.
+ // to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
@@ -214,10 +216,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
@@ -239,6 +242,7 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
@@ -246,24 +250,20 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index);
+ Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
- int deoptimization_index);
+ Safepoint::DeoptMode mode);
void RecordPosition(int position);
- int LastSafepointEnd() {
- return static_cast<int>(safepoints_.GetPcAfterGap());
- }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
@@ -272,8 +272,10 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -283,6 +285,13 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
@@ -292,6 +301,13 @@ class LCodeGen BASE_EMBEDDED {
Handle<Map> type,
Handle<String> name);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
@@ -300,6 +316,8 @@ class LCodeGen BASE_EMBEDDED {
Address address;
};
+ void EnsureSpaceForLazyDeopt();
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -316,6 +334,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
+ int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -376,16 +395,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -396,6 +419,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index f37f31021..4fc3b03ab 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -42,7 +42,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true) {
+ allow_stub_calls_(true),
+ has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -406,32 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address.
- Bfc(object, 0, kPageSizeBits);
-
- // Calculate region number.
- Ubfx(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, address));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -443,38 +418,52 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Will clobber 4 registers: object, offset, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
- // Add offset into the object.
- add(scratch0, object, offset);
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
+ add(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ b(eq, &ok);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object,
+ dst,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+ mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
@@ -484,29 +473,103 @@ void MacroAssembler::RecordWrite(Register object,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register scratch) {
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+ ASSERT(!address.is(cp) && !value.is(cp));
+
+ if (FLAG_debug_code) {
+ Label ok;
+ ldr(ip, MemOperand(address));
+ cmp(ip, value);
+ b(eq, &ok);
+ stop("Wrong address or value passed to RecordWrite");
+ bind(&ok);
+ }
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(value, Operand(kSmiTagMask));
+ b(eq, &done);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq,
+ &done);
// Record the actual write.
- RecordWriteHelper(object, address, scratch);
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(lr);
+ }
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(address, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+ mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(ip, Operand(store_buffer));
+ ldr(scratch, MemOperand(ip));
+ // Store pointer to buffer and increment buffer top.
+ str(address, MemOperand(scratch, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ str(scratch, MemOperand(ip));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kFallThroughAtEnd) {
+ b(eq, &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Ret(eq);
+ }
+ push(lr);
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(lr);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
}
}
@@ -961,6 +1024,9 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@@ -988,6 +1054,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -1011,6 +1080,9 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -1031,28 +1103,23 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
- ASSERT(function->is_compiled());
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(r1, Operand(Handle<JSFunction>(function)));
+ mov(r1, Operand(function));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Invoke the cached code.
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
- } else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
}
@@ -1090,56 +1157,58 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+ HandlerType type,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // The pc (return address) is passed in register lr.
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First compute the state.
+ unsigned state = StackHandler::OffsetField::encode(handler_index);
if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- mov(r3, Operand(StackHandler::TRY_CATCH));
- } else {
- mov(r3, Operand(StackHandler::TRY_FINALLY));
- }
- stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
- // Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(r1, MemOperand(r3));
- push(r1);
- // Link this handler as the new current one.
- str(sp, MemOperand(r3));
+ state |= (type == TRY_CATCH_HANDLER)
+ ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+ : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
} else {
- // Must preserve r0-r4, r5-r7 are available.
ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for fp. We expect the code throwing an exception to check fp
- // before dereferencing it to restore the context.
- mov(r5, Operand(StackHandler::ENTRY)); // State.
- mov(r6, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(r7, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
- // Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(r6, MemOperand(r7));
- push(r6);
- // Link this handler as the new current one.
- str(sp, MemOperand(r7));
+ state |= StackHandler::KindField::encode(StackHandler::ENTRY);
}
+
+ // Set up the code object (r5) and the state (r6) for pushing.
+ mov(r5, Operand(CodeObject()));
+ mov(r6, Operand(state));
+
+ // Push the frame pointer, context, state, and code object.
+ if (try_location == IN_JAVASCRIPT) {
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
+ } else {
+ mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
+ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ }
+
+ // Link the current handler as the next handler.
+ mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ldr(r5, MemOperand(r6));
+ push(r5);
+ // Set this new handler as the current one.
+ str(sp, MemOperand(r6));
}
@@ -1152,42 +1221,50 @@ void MacroAssembler::PopTryHandler() {
}
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // r0 = exception, r1 = code object, r2 = state.
+ ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
+ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
+ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
+ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
+ add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
+}
+
+
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // r0 is expected to hold the exception.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in r0.
if (!value.is(r0)) {
mov(r0, value);
}
-
- // Drop the sp to the top of the handler.
+ // Drop the stack pointer to the top of the top handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
-
// Restore the next handler.
pop(r2);
str(r2, MemOperand(r3));
- // Restore context and frame pointer, discard state (r3).
- ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
+ // Get the code object (r1) and state (r2). Restore the context and frame
+ // pointer.
+ ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
// If the handler is a JS frame, restore the context to the frame.
- // (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
- // of them.
- cmp(r3, Operand(StackHandler::ENTRY));
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ tst(cp, cp);
str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
- pop(pc);
+ JumpToHandlerEntry();
}
@@ -1196,41 +1273,16 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // r0 is expected to hold the exception.
- if (!value.is(r0)) {
- mov(r0, value);
- }
-
- // Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- ldr(r2, MemOperand(sp, kStateOffset));
- cmp(r2, Operand(StackHandler::ENTRY));
- b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- ldr(sp, MemOperand(sp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(r2);
- str(r2, MemOperand(r3));
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+ // The exception is expected in r0.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
@@ -1241,22 +1293,34 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
str(r0, MemOperand(r2));
+ } else if (!value.is(r0)) {
+ mov(r0, value);
}
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // cp
- // fp
- // lr
+ // Drop the stack pointer to the top of the top stack handler.
+ mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ldr(sp, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind);
+ bind(&fetch_next);
+ ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::ENTRY == 0);
+ ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ tst(r2, Operand(StackHandler::KindField::kMask));
+ b(ne, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(r2);
+ str(r2, MemOperand(r3));
+ // Get the code object (r1) and state (r2). Clear the context and frame
+ // pointer (0 was saved in the handler).
+ ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
- // Restore context and frame pointer, discard state (r2).
- ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
- pop(pc);
+ JumpToHandlerEntry();
}
@@ -1536,6 +1600,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
+ ASSERT(!object_size.is(ip));
ASSERT(!result.is(ip));
ASSERT(!scratch1.is(ip));
ASSERT(!scratch2.is(ip));
@@ -1793,13 +1858,127 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ b(ls, fail);
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ b(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ b(hi, fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail) {
+ Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Register mantissa_reg = scratch2;
+ Register exponent_reg = scratch3;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg,
+ scratch1,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ cmp(exponent_reg, scratch1);
+ b(ge, &maybe_nan);
+
+ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ bind(&have_double_value);
+ add(scratch1, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ str(exponent_reg, FieldMemOperand(scratch1, offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ b(gt, &is_nan);
+ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ cmp(mantissa_reg, Operand(0));
+ b(eq, &have_double_value);
+ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ jmp(&have_double_value);
+
+ bind(&smi_value);
+ add(scratch1, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ add(scratch1, scratch1,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ // scratch1 is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+
+ Register untagged_value = receiver_reg;
+ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(this,
+ untagged_value,
+ destination,
+ d0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ s2);
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ vstr(d0, scratch1, 0);
+ } else {
+ str(mantissa_reg, MemOperand(scratch1, 0));
+ str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -1850,7 +2029,8 @@ void MacroAssembler::DispatchMap(Register obj,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
@@ -1858,6 +2038,16 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
b(ne, miss);
+ if (miss_on_bound_function) {
+ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ tst(scratch,
+ Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+ b(ne, miss);
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -1895,47 +2085,24 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Handle<Code> code(Code::cast(result));
- Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
- return result;
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -1998,14 +2165,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
mov(pc, lr);
bind(&promote_scheduled_exception);
- MaybeObject* result
- = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -2017,8 +2180,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
jmp(&leave_exit_frame);
+}
+
- return result;
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
}
@@ -2417,8 +2584,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -2445,17 +2611,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -2476,21 +2631,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- return TryTailCallStub(&stub);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
@@ -2622,14 +2768,20 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
- CallRuntime(Runtime::kAbort, 2);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -2930,6 +3082,19 @@ void MacroAssembler::CopyBytes(Register src,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ b(&entry);
+ bind(&loop);
+ str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ bind(&entry);
+ cmp(start_offset, end_offset);
+ b(lt, &loop);
+}
+
+
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
@@ -2941,8 +3106,10 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
- mov(zeros, Operand(0, RelocInfo::NONE));
+ // Order of the next two lines is important: zeros register
+ // can be the same as source register.
Move(scratch, source);
+ mov(zeros, Operand(0, RelocInfo::NONE));
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3089,23 +3256,15 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- CallCFunctionHelper(no_reg,
- function,
- ip,
- num_reg_arguments,
- num_double_arguments);
+ mov(ip, Operand(function));
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_reg_arguments,
- num_double_arguments);
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
@@ -3116,17 +3275,15 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
int num_arguments) {
- CallCFunction(function, scratch, num_arguments, 0);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
int num_reg_arguments,
int num_double_arguments) {
+ ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -3150,10 +3307,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
- if (function.is(no_reg)) {
- mov(scratch, Operand(function_reference));
- function = scratch;
- }
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@@ -3185,6 +3338,185 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met) {
+ and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+ ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ tst(scratch, Operand(mask));
+ b(cc, condition_met);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(ip, Operand(mask_scratch));
+ b(first_bit == 1 ? eq : ne, &other_color);
+ // Shift left 1 by adding.
+ add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
+ b(eq, &word_boundary);
+ tst(ip, Operand(mask_scratch));
+ b(second_bit == 1 ? ne : eq, has_color);
+ jmp(&other_color);
+
+ bind(&word_boundary);
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+ tst(ip, Operand(1));
+ b(second_bit == 1 ? ne : eq, has_color);
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ Label is_data_object;
+ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ b(eq, &is_data_object);
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ b(ne, not_data_object);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+ Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
+ mov(ip, Operand(1));
+ mov(mask_reg, Operand(ip, LSL, mask_reg));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(mask_scratch, load_scratch);
+ b(ne, &done);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // LSL may overflow, making the check conservative.
+ tst(load_scratch, Operand(mask_scratch, LSL, 1));
+ b(eq, &ok);
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object;
+
+ // Check for heap-number
+ ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
+ b(eq, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ b(ne, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ tst(instance_type, Operand(kExternalStringTag));
+ mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
+ b(ne, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ ldr(ip, FieldMemOperand(value, String::kLengthOffset));
+ tst(instance_type, Operand(kStringEncodingMask));
+ mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
+ add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, length, Operand(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ orr(ip, ip, Operand(mask_scratch));
+ str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ add(ip, ip, Operand(length));
+ str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
}
@@ -3234,6 +3566,17 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 6084fde2d..2725883ee 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -29,6 +29,7 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -38,12 +39,12 @@ namespace internal {
// Static helper functions
// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
+inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-static inline Operand SmiUntagOperand(Register object) {
+inline Operand SmiUntagOperand(Register object) {
return Operand(object, ASR, kSmiTagSize);
}
@@ -79,6 +80,14 @@ enum ObjectToDoubleFlags {
};
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -157,40 +166,126 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object,
+ Register value,
+ Register address);
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise
- Label* branch);
-
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the ip register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
+ void JumpIfBlack(Register object,
Register scratch0,
- Register scratch1);
+ Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* object_is_white_and_not_data);
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
+ // Detects conservatively whether an object is data-only, ie it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Push a handle.
void Push(Handle<Object> handle);
@@ -225,8 +320,11 @@ class MacroAssembler: public Assembler {
}
// Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2,
- Register src3, Register src4, Condition cond = al) {
+ void Push(Register src1,
+ Register src2,
+ Register src3,
+ Register src4,
+ Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
@@ -265,6 +363,57 @@ class MacroAssembler: public Assembler {
}
}
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
+ ASSERT(!src1.is(src2));
+ ASSERT(!src2.is(src3));
+ ASSERT(!src1.is(src3));
+ if (src1.code() > src2.code()) {
+ if (src2.code() > src3.code()) {
+ ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ } else {
+ ldr(src3, MemOperand(sp, 4, PostIndex), cond);
+ ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ }
+ } else {
+ Pop(src2, src3, cond);
+ str(src1, MemOperand(sp, 4, PostIndex), cond);
+ }
+ }
+
+ // Pop four registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1,
+ Register src2,
+ Register src3,
+ Register src4,
+ Condition cond = al) {
+ ASSERT(!src1.is(src2));
+ ASSERT(!src2.is(src3));
+ ASSERT(!src1.is(src3));
+ ASSERT(!src1.is(src4));
+ ASSERT(!src2.is(src4));
+ ASSERT(!src3.is(src4));
+ if (src1.code() > src2.code()) {
+ if (src2.code() > src3.code()) {
+ if (src3.code() > src4.code()) {
+ ldm(ia_w,
+ sp,
+ src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+ cond);
+ } else {
+ ldr(src4, MemOperand(sp, 4, PostIndex), cond);
+ ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ }
+ } else {
+ Pop(src3, src4, cond);
+ ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ }
+ } else {
+ Pop(src2, src3, src4, cond);
+ ldr(src1, MemOperand(sp, 4, PostIndex), cond);
+ }
+ }
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -318,16 +467,6 @@ class MacroAssembler: public Assembler {
const double imm,
const Condition cond = al);
-
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -381,7 +520,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind);
@@ -410,9 +549,9 @@ class MacroAssembler: public Assembler {
// Exception handling
// Push a new try handler and link into try handler chain.
- // The return address must be passed in register lr.
- // On exit, r0 contains TOS (code slot).
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ void PushTryHandler(CodeLocation try_location,
+ HandlerType type,
+ int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
@@ -569,6 +708,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -580,7 +726,8 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
@@ -608,6 +755,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -754,20 +926,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
-
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
- Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -786,12 +947,6 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC
- // failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -830,28 +985,25 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
- void CallCFunction(Register function, Register scratch,
+ void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context.
- // stack_space - space to be unwound on exit (includes the call js
- // arguments space and the additional space allocated for the fast call).
- MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call js arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -902,6 +1054,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@@ -1048,10 +1203,12 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
private:
void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
int num_reg_arguments,
int num_double_arguments);
@@ -1067,16 +1224,29 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@@ -1084,6 +1254,7 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -1129,12 +1300,12 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-static MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-static inline MemOperand GlobalObjectOperand() {
+inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index cd76edbf1..b212f9f6e 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -371,9 +371,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+ }
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -611,6 +614,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
@@ -1102,6 +1111,11 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 6af535553..0525529fd 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -53,7 +53,7 @@ namespace internal {
// code.
class ArmDebugger {
public:
- explicit ArmDebugger(Simulator* sim);
+ explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
~ArmDebugger();
void Stop(Instruction* instr);
@@ -84,11 +84,6 @@ class ArmDebugger {
};
-ArmDebugger::ArmDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
-
ArmDebugger::~ArmDebugger() {
}
@@ -296,6 +291,13 @@ void ArmDebugger::Debug() {
if (line == NULL) {
break;
} else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int argc = SScanF(line,
@@ -611,7 +613,6 @@ void ArmDebugger::Debug() {
PrintF("Unknown command: %s\n", cmd);
}
}
- DeleteArray(line);
}
// Add all the breakpoints back to stop execution and enter the debugger
@@ -645,6 +646,12 @@ static bool AllOnOnePage(uintptr_t start, int size) {
}
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
size_t size) {
@@ -781,6 +788,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
InitializeCoverage();
+
+ last_debugger_input_ = NULL;
}
@@ -1268,9 +1277,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+ // Leave a safety margin of 512 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 256;
+ return reinterpret_cast<uintptr_t>(stack_) + 512;
}
@@ -1618,6 +1627,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ // Catch null pointers a little earlier.
+ ASSERT(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 391ef69f5..585f1e017 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -194,6 +194,10 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
@@ -360,6 +364,9 @@ class Simulator {
bool pc_modified_;
int icount_;
+ // Debugger input.
+ char* last_debugger_input_;
+
// Icache simulation
v8::internal::HashMap* i_cache_;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index f8565924b..47778f580 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -95,13 +95,12 @@ static void ProbeTable(Isolate* isolate,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsSymbol());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -138,20 +137,15 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
__ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- if (result->IsFailure()) return result;
-
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- return result;
}
@@ -238,7 +232,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -246,8 +243,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -259,8 +256,10 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -367,9 +366,9 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -395,11 +394,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
- __ mov(r2, Operand(Handle<Map>(transition)));
+ __ mov(r2, Operand(transition));
__ Push(r2, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -409,10 +408,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
- __ mov(ip, Operand(Handle<Map>(transition)));
+ __ mov(ip, Operand(transition));
__ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
}
@@ -431,7 +430,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ __ mov(name_reg, r0);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -444,7 +449,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ __ mov(name_reg, r0);
+ __ RecordWriteField(scratch,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Return the value (register r0).
@@ -455,20 +466,15 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
static void GenerateCallFunction(MacroAssembler* masm,
- Object* object,
+ Handle<Object> object,
const ParameterCount& arguments,
Label* miss,
Code::ExtraICState extra_ic_state) {
@@ -501,12 +507,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ mov(scratch, Operand(interceptor));
__ push(scratch);
__ push(receiver);
__ push(holder);
@@ -515,11 +521,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -532,6 +539,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ CallStub(&stub);
}
+
static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to FastHandleApiCall in the
@@ -553,7 +561,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
// ----------- S t a t e -------------
@@ -566,18 +574,18 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(r5, Operand(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ mov(r5, Operand(function));
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(r0, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ Move(r0, api_call_info);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
- __ Move(r6, Handle<Object>(call_data));
+ __ Move(r6, call_data);
}
// Store js function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
@@ -586,11 +594,9 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// (refer to layout above).
__ add(r2, sp, Operand(2 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
@@ -608,17 +614,18 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
__ mov(ip, Operand(0));
__ str(ip, MemOperand(r0, 3 * kPointerSize));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
- return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ AllowExternalCallThatCantCauseGC scope(masm);
+
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -630,86 +637,63 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
-
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value();
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
-
Counters* counters = masm->isolate()->counters();
-
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
- }
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
+ }
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
__ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
+ scratch1, scratch2);
if (can_do_fast_api_call) {
__ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -722,9 +706,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -737,10 +721,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -751,10 +736,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiDirectCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -775,64 +757,53 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
-
- return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
// Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
-
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
5);
-
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ Push(holder, name_);
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(holder, name_);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ }
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch);
@@ -849,52 +820,42 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(Handle<Object>(cell)));
+ __ mov(scratch, Operand(cell));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
- return cell;
}
+
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = GenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
}
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
}
- return NULL;
}
@@ -1008,13 +969,13 @@ static void GenerateUInt2Double(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between holder and object registers.
@@ -1032,83 +993,52 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
+ name = factory()->LookupSymbol(name);
}
- ASSERT(current->property_dictionary()->FindEntry(name) ==
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
+ reg = holder_reg; // From now on the object will be in holder_reg.
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
+ } else {
+ Handle<Map> current_map(current->map());
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
+ __ cmp(scratch1, Operand(current_map));
// Branch on the result of the map check.
__ b(ne, miss);
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
}
+ reg = holder_reg; // From now on the object will be in holder_reg.
- reg = holder_reg; // from now the object is in holder_reg
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ b(ne, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, Operand(prototype));
}
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Operand(Handle<JSObject>(prototype)));
}
if (save_at_depth == depth) {
@@ -1119,143 +1049,131 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
current = prototype;
}
+ // Log the check depth.
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+
// Check the holder map.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
__ b(ne, miss);
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
- };
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = GenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
+ }
+
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name,
- miss);
+ CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(r0, Operand(Handle<Object>(value)));
+ __ mov(r0, Operand(value));
__ Ret();
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- Handle<AccessorInfo> callback_handle(callback);
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch3, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch3, callback);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
- __ Move(scratch3, Handle<Object>(callback_handle->data()));
+ __ Move(scratch3, Handle<Object>(callback->data()));
}
__ Push(reg, scratch3, name_reg);
__ mov(r0, sp); // r0 = Handle<String>
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
+
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = 4;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_GETTER_CALL,
masm()->isolate());
- return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1271,9 +1189,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1288,48 +1206,45 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- __ LeaveInternalFrame();
- __ Ret();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+ // Leave the internal frame.
}
-
- __ LeaveInternalFrame();
-
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1341,21 +1256,21 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ Ret();
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
- __ Move(scratch2, Handle<AccessorInfo>(callback));
+ __ Move(scratch2, callback);
// holder_reg is either receiver or scratch1.
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
@@ -1392,17 +1307,17 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(Handle<String>(name)));
+ __ cmp(r2, Operand(name));
__ b(ne, miss);
}
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1415,7 +1330,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, miss);
}
@@ -1424,15 +1339,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r3, Operand(cell));
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1446,30 +1362,26 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ cmp(r4, r3);
- __ b(ne, miss);
} else {
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, miss);
+ __ cmp(r1, Operand(function));
}
+ __ b(ne, miss);
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
+ extra_state_);
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1489,23 +1401,23 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1515,14 +1427,12 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
-
GenerateNameCheck(name, &miss);
Register receiver = r1;
-
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1531,8 +1441,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), receiver,
- holder, r3, r0, r4, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
+ name, &miss);
if (argc == 0) {
// Nothing to do, just return the length.
@@ -1541,10 +1451,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Ret();
} else {
Label call_builtin;
-
Register elements = r3;
Register end_elements = r5;
-
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1556,7 +1464,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1571,11 +1479,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements);
+ // Check if value is a smi.
+ Label with_write_barrier;
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(r4, &with_write_barrier);
+
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1585,14 +1497,31 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
- __ JumpIfNotSmi(r4, &with_write_barrier);
- __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
- __ InNewSpace(elements, r4, eq, &exit);
- __ RecordWriteHelper(elements, end_elements, r4);
+
+ __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r6, r6, &call_builtin);
+
+ // Save new length.
+ __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ add(end_elements, elements,
+ Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Ret();
@@ -1604,6 +1533,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
+ __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(r2, &no_fast_elements_check);
+ __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ bind(&no_fast_elements_check);
+
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
@@ -1630,8 +1568,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ str(r6, MemOperand(r7));
// Push the argument.
- __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
- __ str(r6, MemOperand(end_elements));
+ __ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -1656,19 +1593,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1678,25 +1615,22 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
Register receiver = r1;
Register elements = r3;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object),
- receiver, holder, elements, r4, r0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+ r4, r0, name, &miss);
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1745,20 +1679,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1768,21 +1701,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1790,13 +1721,12 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Context::STRING_FUNCTION_INDEX,
r0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r1;
Register index = r4;
- Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1805,20 +1735,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1829,22 +1758,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in r2.
- __ Move(r2, Handle<String>(name));
+ __ Move(r2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1854,21 +1782,18 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1876,14 +1801,13 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Context::STRING_FUNCTION_INDEX,
r0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r0;
Register index = r4;
- Register scratch1 = r1;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1892,21 +1816,20 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1917,22 +1840,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in r2.
- __ Move(r2, Handle<String>(name));
+ __ Move(r2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1945,22 +1867,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -1976,13 +1899,13 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, r0);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, r0);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -1991,19 +1914,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2013,31 +1936,28 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -----------------------------------
if (!CpuFeatures::IsSupported(VFP3)) {
- return heap()->undefined_value();
+ return Handle<Code>::null();
}
CpuFeatures::Scope scope_vfp3(VFP3);
-
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2139,19 +2059,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2161,25 +2081,22 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -----------------------------------
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2240,35 +2157,33 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
-
GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
@@ -2284,44 +2199,40 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), r0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm(), optimization, argc);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack
@@ -2336,16 +2247,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(masm()->isolate()->counters()->call_const(),
1, r0, r3);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2356,28 +2265,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(r1, &fast);
@@ -2387,18 +2293,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2411,112 +2317,91 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
-
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- r1,
- r3,
- r4,
- r0,
- &miss);
- if (result->IsFailure()) {
- return result;
- }
+ CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
+ compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
+ &miss);
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
// Restore receiver.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
-
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy if
@@ -2532,39 +2417,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- } else {
- __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
- JUMP_FUNCTION, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2573,24 +2450,20 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- r1, r2, r3,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2617,7 +2490,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ push(r1); // receiver
- __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
+ __ mov(ip, Operand(callback)); // callback info
__ Push(ip, r2, r0);
// Do tail-call to the runtime system.
@@ -2636,8 +2509,9 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2684,9 +2558,10 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2704,7 +2579,7 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r4, Operand(cell));
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ cmp(r5, r6);
@@ -2713,6 +2588,15 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+ __ mov(r1, r0);
+ __ RecordWriteField(r4,
+ JSGlobalPropertyCell::kValueOffset,
+ r1,
+ r2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
@@ -2728,9 +2612,9 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- lr : return address
@@ -2746,15 +2630,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- r1,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2766,14 +2643,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2790,24 +2667,19 @@ MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
- callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2816,10 +2688,10 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2836,9 +2708,9 @@ MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2846,17 +2718,9 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
// -----------------------------------
Label miss;
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object,
- holder,
- &lookup,
- r0,
- r2,
- r3,
- r1,
- r4,
- name,
+ GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2866,11 +2730,12 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2881,7 +2746,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, &miss);
}
@@ -2889,7 +2754,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r3, Operand(cell));
__ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -2913,9 +2778,9 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- lr : return address
@@ -2925,7 +2790,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
@@ -2936,11 +2801,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -2949,16 +2814,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
- r4, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2966,10 +2826,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -2978,7 +2839,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
@@ -2990,9 +2851,10 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3001,20 +2863,12 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- r1,
- r0,
- r2,
- r3,
- r4,
- name,
+ GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3023,7 +2877,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3032,7 +2887,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadArrayLength(masm(), r1, r2, &miss);
@@ -3043,7 +2898,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3055,7 +2911,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
@@ -3068,7 +2924,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3080,7 +2937,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
// Check the name hasn't changed.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
@@ -3092,33 +2949,29 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r1,
- r2,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3130,11 +2983,9 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
int receiver_count = receiver_maps->length();
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ mov(ip, Operand(map));
+ __ mov(ip, Operand(receiver_maps->at(current)));
__ cmp(r2, ip);
- __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
}
__ bind(&miss);
@@ -3142,14 +2993,14 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : name
@@ -3162,17 +3013,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
// Check that the name has not changed.
- __ cmp(r1, Operand(Handle<String>(name)));
+ __ cmp(r1, Operand(name));
__ b(ne, &miss);
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- r2, r1, r3,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3180,11 +3026,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3192,29 +3039,25 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// -- lr : return address
// -- r3 : scratch
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r2,
- r3,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+ __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3227,12 +3070,18 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
int receiver_count = receiver_maps->length();
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ mov(ip, Operand(map));
+ for (int i = 0; i < receiver_count; ++i) {
+ __ mov(ip, Operand(receiver_maps->at(i)));
__ cmp(r3, ip);
- __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+ } else {
+ Label next_map;
+ __ b(ne, &next_map);
+ __ mov(r3, Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -3240,11 +3089,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
@@ -3290,12 +3140,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- SIZE_IN_WORDS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -3327,7 +3172,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r7: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed, next;
@@ -3454,6 +3299,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3540,6 +3386,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3784,9 +3631,9 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -3880,6 +3727,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3943,6 +3791,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4082,6 +3931,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4157,9 +4007,9 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
__ Ret();
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -4234,8 +4084,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4244,7 +4096,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = r0;
Register key_reg = r1;
@@ -4277,15 +4129,33 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
- __ add(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ str(value_reg,
- MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ RecordWrite(scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
- receiver_reg , elements_reg);
-
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ __ add(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ add(scratch,
+ scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value_reg, MemOperand(scratch));
+ } else {
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ add(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ add(scratch,
+ scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value_reg, MemOperand(scratch));
+ __ mov(receiver_reg, value_reg);
+ __ RecordWrite(elements_reg, // Object.
+ scratch, // Address.
+ receiver_reg, // Value.
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
// value_reg (r0) is preserved.
// Done.
__ Ret();
@@ -4294,6 +4164,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4309,15 +4183,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
- Register mantissa_reg = r5;
- Register exponent_reg = r6;
+ Register elements_reg = r3;
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+ Register scratch3 = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4329,90 +4203,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ ldr(scratch,
+ __ ldr(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ cmp(key_reg, scratch);
+ __ cmp(key_reg, scratch1);
__ b(hs, &miss_force_generic);
- // Handle smi values specially.
- __ JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- __ CheckMap(value_reg,
- scratch,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- __ cmp(exponent_reg, scratch);
- __ b(ge, &maybe_nan);
-
- __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- __ bind(&have_double_value);
- __ add(scratch, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ str(exponent_reg, FieldMemOperand(scratch, offset));
- __ Ret();
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ b(gt, &is_nan);
- __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- __ cmp(mantissa_reg, Operand(0));
- __ b(eq, &have_double_value);
- __ bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- __ jmp(&have_double_value);
-
- __ bind(&smi_value);
- __ add(scratch, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- __ SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(
- masm,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- __ vstr(d0, scratch, 0);
- } else {
- __ str(mantissa_reg, MemOperand(scratch, 0));
- __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
- }
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ receiver_reg,
+ elements_reg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4420,6 +4229,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 98fe3ac7b..3d8e278b9 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -201,17 +201,14 @@ function ConvertToString(x) {
function ConvertToLocaleString(e) {
- if (e == null) {
+ if (IS_NULL_OR_UNDEFINED(e)) {
return '';
} else {
- // e_obj's toLocaleString might be overwritten, check if it is a function.
- // Call ToString if toLocaleString is not a function.
- // See issue 877615.
+ // According to ES5, seciton 15.4.4.3, the toLocaleString conversion
+ // must throw a TypeError if ToObject(e).toLocaleString isn't
+ // callable.
var e_obj = ToObject(e);
- if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
- return ToString(e_obj.toLocaleString());
- else
- return ToString(e);
+ return %ToString(e_obj.toLocaleString());
}
}
@@ -331,8 +328,9 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
// would be the appropriate test. We follow KJS in consulting the
// prototype.
var current = array[index];
- if (!IS_UNDEFINED(current) || index in array)
+ if (!IS_UNDEFINED(current) || index in array) {
deleted_elements[i] = current;
+ }
}
}
@@ -381,18 +379,31 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
function ArrayToString() {
- if (!IS_ARRAY(this)) {
- throw new $TypeError('Array.prototype.toString is not generic');
+ var array;
+ var func;
+ if (IS_ARRAY(this)) {
+ func = this.join;
+ if (func === ArrayJoin) {
+ return Join(this, this.length, ',', ConvertToString);
+ }
+ array = this;
+ } else {
+ array = ToObject(this);
+ func = array.join;
}
- return Join(this, this.length, ',', ConvertToString);
+ if (!IS_SPEC_FUNCTION(func)) {
+ return %_CallFunction(array, ObjectToString);
+ }
+ return %_CallFunction(array, func);
}
function ArrayToLocaleString() {
- if (!IS_ARRAY(this)) {
- throw new $TypeError('Array.prototype.toString is not generic');
- }
- return Join(this, this.length, ',', ConvertToLocaleString);
+ var array = ToObject(this);
+ var arrayLen = array.length;
+ var len = TO_UINT32(arrayLen);
+ if (len === 0) return "";
+ return Join(array, len, ',', ConvertToLocaleString);
}
@@ -485,12 +496,12 @@ function SparseReverse(array, len) {
if (j_complement <= i) {
high = j;
- while (keys[--high_counter] == j);
+ while (keys[--high_counter] == j) { }
low = j_complement;
}
if (j_complement >= i) {
low = i;
- while (keys[++low_counter] == i);
+ while (keys[++low_counter] == i) { }
high = len - i - 1;
}
@@ -566,10 +577,11 @@ function ArrayShift() {
var first = this[0];
- if (IS_ARRAY(this))
+ if (IS_ARRAY(this)) {
SmartMove(this, 0, 1, len, 0);
- else
+ } else {
SimpleMove(this, 0, 1, len, 0);
+ }
this.length = len - 1;
@@ -586,10 +598,11 @@ function ArrayUnshift(arg1) { // length == 1
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- if (IS_ARRAY(this))
+ if (IS_ARRAY(this)) {
SmartMove(this, 0, 0, len, num_arguments);
- else
+ } else {
SimpleMove(this, 0, 0, len, num_arguments);
+ }
for (var i = 0; i < num_arguments; i++) {
this[i] = %_Arguments(i);
@@ -993,25 +1006,32 @@ function ArrayFilter(f, receiver) {
["Array.prototype.filter"]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = ToObject(this);
+ var length = ToUint32(array.length);
+
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
+ } else if (!IS_SPEC_OBJECT(receiver)) {
+ receiver = ToObject(receiver);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = ToUint32(this.length);
- var result = [];
- var result_length = 0;
+
+ var result = new $Array();
+ var accumulator = new InternalArray();
+ var accumulator_length = 0;
for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- if (%_CallFunction(receiver, current, i, this, f)) {
- result[result_length++] = current;
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ if (%_CallFunction(receiver, current, i, array, f)) {
+ accumulator[accumulator_length++] = current;
}
}
}
+ %MoveArrayContents(accumulator, result);
return result;
}
@@ -1022,19 +1042,24 @@ function ArrayForEach(f, receiver) {
["Array.prototype.forEach"]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = ToObject(this);
+ var length = TO_UINT32(array.length);
+
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
+ } else if (!IS_SPEC_OBJECT(receiver)) {
+ receiver = ToObject(receiver);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
+
for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- %_CallFunction(receiver, current, i, this, f);
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ %_CallFunction(receiver, current, i, array, f);
}
}
}
@@ -1048,19 +1073,24 @@ function ArraySome(f, receiver) {
["Array.prototype.some"]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = ToObject(this);
+ var length = TO_UINT32(array.length);
+
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
+ } else if (!IS_SPEC_OBJECT(receiver)) {
+ receiver = ToObject(receiver);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
+
for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- if (%_CallFunction(receiver, current, i, this, f)) return true;
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ if (%_CallFunction(receiver, current, i, array, f)) return true;
}
}
return false;
@@ -1073,19 +1103,24 @@ function ArrayEvery(f, receiver) {
["Array.prototype.every"]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = ToObject(this);
+ var length = TO_UINT32(array.length);
+
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
+ } else if (!IS_SPEC_OBJECT(receiver)) {
+ receiver = ToObject(receiver);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
+
for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- if (!%_CallFunction(receiver, current, i, this, f)) return false;
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ if (!%_CallFunction(receiver, current, i, array, f)) return false;
}
}
return true;
@@ -1097,21 +1132,26 @@ function ArrayMap(f, receiver) {
["Array.prototype.map"]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = ToObject(this);
+ var length = TO_UINT32(array.length);
+
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
+ } else if (!IS_SPEC_OBJECT(receiver)) {
+ receiver = ToObject(receiver);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = TO_UINT32(this.length);
+
var result = new $Array();
var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) {
- var current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
- accumulator[i] = %_CallFunction(receiver, current, i, this, f);
+ var current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
+ accumulator[i] = %_CallFunction(receiver, current, i, array, f);
}
}
%MoveArrayContents(accumulator, result);
@@ -1245,19 +1285,20 @@ function ArrayReduce(callback, current) {
["Array.prototype.reduce"]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = ToObject(this);
+ var length = ToUint32(array.length);
+
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping.
- var length = ToUint32(this.length);
var i = 0;
-
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
- current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
+ current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
i++;
break find_initial;
}
@@ -1267,9 +1308,9 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) {
- var element = this[i];
- if (!IS_UNDEFINED(element) || i in this) {
- current = %_CallFunction(receiver, current, element, i, this, callback);
+ var element = array[i];
+ if (!IS_UNDEFINED(element) || i in array) {
+ current = %_CallFunction(receiver, current, element, i, array, callback);
}
}
return current;
@@ -1281,15 +1322,20 @@ function ArrayReduceRight(callback, current) {
["Array.prototype.reduceRight"]);
}
+ // Pull out the length so that side effects are visible before the
+ // callback function is checked.
+ var array = ToObject(this);
+ var length = ToUint32(array.length);
+
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
- var i = ToUint32(this.length) - 1;
+ var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
- current = this[i];
- if (!IS_UNDEFINED(current) || i in this) {
+ current = array[i];
+ if (!IS_UNDEFINED(current) || i in array) {
i--;
break find_initial;
}
@@ -1299,9 +1345,9 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) {
- var element = this[i];
- if (!IS_UNDEFINED(element) || i in this) {
- current = %_CallFunction(receiver, current, element, i, this, callback);
+ var element = array[i];
+ if (!IS_UNDEFINED(element) || i in array) {
+ current = %_CallFunction(receiver, current, element, i, array, callback);
}
}
return current;
@@ -1342,7 +1388,7 @@ function SetUpArray() {
// set their names.
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
- InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
+ InstallFunctions($Array.prototype, DONT_ENUM, $Array(
"toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin),
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index ad5f35081..bc05c0180 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -38,6 +38,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic-inl.h"
+#include "incremental-marking.h"
#include "factory.h"
#include "runtime.h"
#include "runtime-profiler.h"
@@ -47,6 +48,7 @@
#include "ast.h"
#include "regexp-macro-assembler.h"
#include "platform.h"
+#include "store-buffer.h"
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -516,6 +518,7 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask) {
+ rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
// Relocation info is read backwards.
@@ -736,9 +739,38 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
+ExternalReference ExternalReference::
+ incremental_marking_record_write_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+ incremental_evacuation_record_write_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+ store_buffer_overflow_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
+}
+
+
+ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
+}
+
+
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(Runtime::PerformGC)));
+ return
+ ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
}
@@ -802,19 +834,8 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
}
-ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
- return ExternalReference(isolate->factory()->the_hole_value().location());
-}
-
-
-ExternalReference ExternalReference::arguments_marker_location(
- Isolate* isolate) {
- return ExternalReference(isolate->factory()->arguments_marker().location());
-}
-
-
-ExternalReference ExternalReference::roots_address(Isolate* isolate) {
- return ExternalReference(isolate->heap()->roots_address());
+ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->roots_array_start());
}
@@ -840,9 +861,14 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
}
+ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
+}
+
+
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
- Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
- return ExternalReference(mask);
+ return ExternalReference(reinterpret_cast<Address>(
+ isolate->heap()->NewSpaceMask()));
}
@@ -1025,6 +1051,11 @@ static double math_cos_double(double x) {
}
+static double math_tan_double(double x) {
+ return tan(x);
+}
+
+
static double math_log_double(double x) {
return log(x);
}
@@ -1046,6 +1077,14 @@ ExternalReference ExternalReference::math_cos_double_function(
}
+ExternalReference ExternalReference::math_tan_double_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(math_tan_double),
+ BUILTIN_FP_CALL));
+}
+
+
ExternalReference ExternalReference::math_log_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@@ -1111,6 +1150,23 @@ static int native_compare_doubles(double y, double x) {
}
+bool EvalComparison(Token::Value op, double op1, double op2) {
+ ASSERT(Token::IsCompareOp(op));
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT: return (op1 == op2);
+ case Token::NE: return (op1 != op2);
+ case Token::LT: return (op1 < op2);
+ case Token::GT: return (op1 > op2);
+ case Token::LTE: return (op1 <= op2);
+ case Token::GTE: return (op1 >= op2);
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation, Isolate* isolate) {
typedef double BinaryFPOperation(double x, double y);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index d58034df0..5c25768e6 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -143,6 +143,9 @@ class Label BASE_EMBEDDED {
};
+enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+
+
// -----------------------------------------------------------------------------
// Relocation information
@@ -216,8 +219,9 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo() {}
- RelocInfo(byte* pc, Mode rmode, intptr_t data)
- : pc_(pc), rmode_(rmode), data_(data) {
+
+ RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
+ : pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
static inline bool IsConstructCall(Mode mode) {
@@ -226,6 +230,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
}
+ static inline bool IsEmbeddedObject(Mode mode) {
+ return mode == EMBEDDED_OBJECT;
+ }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@@ -258,6 +265,7 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
+ Code* host() const { return host_; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@@ -271,14 +279,17 @@ class RelocInfo BASE_EMBEDDED {
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
INLINE(Address target_address());
- INLINE(void set_target_address(Address target));
+ INLINE(void set_target_address(Address target,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(Object** target_object_address());
- INLINE(void set_target_object(Object* target));
+ INLINE(void set_target_object(Object* target,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(JSGlobalPropertyCell* target_cell());
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
- INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
+ INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
// Read the address of the word containing the target_address in an
@@ -353,6 +364,7 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_;
Mode rmode_;
intptr_t data_;
+ Code* host_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management
@@ -561,6 +573,13 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
+ static ExternalReference incremental_marking_record_write_function(
+ Isolate* isolate);
+ static ExternalReference incremental_evacuation_record_write_function(
+ Isolate* isolate);
+ static ExternalReference store_buffer_overflow_function(
+ Isolate* isolate);
+ static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference fill_heap_number_with_random_function(
Isolate* isolate);
@@ -577,14 +596,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
- // Static variable Factory::the_hole_value.location()
- static ExternalReference the_hole_value_location(Isolate* isolate);
-
- // Static variable Factory::arguments_marker.location()
- static ExternalReference arguments_marker_location(Isolate* isolate);
-
- // Static variable Heap::roots_address()
- static ExternalReference roots_address(Isolate* isolate);
+ // Static variable Heap::roots_array_start()
+ static ExternalReference roots_array_start(Isolate* isolate);
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit(Isolate* isolate);
@@ -606,6 +619,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_start(Isolate* isolate);
static ExternalReference new_space_mask(Isolate* isolate);
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
+ static ExternalReference new_space_mark_bits(Isolate* isolate);
+
+ // Write barrier.
+ static ExternalReference store_buffer_top(Isolate* isolate);
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
@@ -635,6 +652,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
+ static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
Address address() const {return reinterpret_cast<Address>(address_);}
@@ -799,33 +817,33 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
-static inline bool is_intn(int x, int n) {
+inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1));
}
-static inline bool is_int8(int x) { return is_intn(x, 8); }
-static inline bool is_int16(int x) { return is_intn(x, 16); }
-static inline bool is_int18(int x) { return is_intn(x, 18); }
-static inline bool is_int24(int x) { return is_intn(x, 24); }
+inline bool is_int8(int x) { return is_intn(x, 8); }
+inline bool is_int16(int x) { return is_intn(x, 16); }
+inline bool is_int18(int x) { return is_intn(x, 18); }
+inline bool is_int24(int x) { return is_intn(x, 24); }
-static inline bool is_uintn(int x, int n) {
+inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0;
}
-static inline bool is_uint2(int x) { return is_uintn(x, 2); }
-static inline bool is_uint3(int x) { return is_uintn(x, 3); }
-static inline bool is_uint4(int x) { return is_uintn(x, 4); }
-static inline bool is_uint5(int x) { return is_uintn(x, 5); }
-static inline bool is_uint6(int x) { return is_uintn(x, 6); }
-static inline bool is_uint8(int x) { return is_uintn(x, 8); }
-static inline bool is_uint10(int x) { return is_uintn(x, 10); }
-static inline bool is_uint12(int x) { return is_uintn(x, 12); }
-static inline bool is_uint16(int x) { return is_uintn(x, 16); }
-static inline bool is_uint24(int x) { return is_uintn(x, 24); }
-static inline bool is_uint26(int x) { return is_uintn(x, 26); }
-static inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
-static inline int NumberOfBitsSet(uint32_t x) {
+inline bool is_uint2(int x) { return is_uintn(x, 2); }
+inline bool is_uint3(int x) { return is_uintn(x, 3); }
+inline bool is_uint4(int x) { return is_uintn(x, 4); }
+inline bool is_uint5(int x) { return is_uintn(x, 5); }
+inline bool is_uint6(int x) { return is_uintn(x, 6); }
+inline bool is_uint8(int x) { return is_uintn(x, 8); }
+inline bool is_uint10(int x) { return is_uintn(x, 10); }
+inline bool is_uint12(int x) { return is_uintn(x, 12); }
+inline bool is_uint16(int x) { return is_uintn(x, 16); }
+inline bool is_uint24(int x) { return is_uintn(x, 24); }
+inline bool is_uint26(int x) { return is_uintn(x, 26); }
+inline bool is_uint28(int x) { return is_uintn(x, 28); }
+
+inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
num_bits_set += x & 1;
@@ -833,6 +851,8 @@ static inline int NumberOfBitsSet(uint32_t x) {
return num_bits_set;
}
+bool EvalComparison(Token::Value op, double op1, double op2);
+
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_double_int(double x, int y);
double power_double_double(double x, double y);
diff --git a/deps/v8/src/ast-inl.h b/deps/v8/src/ast-inl.h
deleted file mode 100644
index 731ad2ff3..000000000
--- a/deps/v8/src/ast-inl.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_INL_H_
-#define V8_AST_INL_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-SwitchStatement::SwitchStatement(Isolate* isolate,
- ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
- tag_(NULL), cases_(NULL) {
-}
-
-
-Block::Block(Isolate* isolate,
- ZoneStringList* labels,
- int capacity,
- bool is_initializer_block)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity),
- is_initializer_block_(is_initializer_block),
- block_scope_(NULL) {
-}
-
-
-BreakableStatement::BreakableStatement(Isolate* isolate,
- ZoneStringList* labels,
- Type type)
- : labels_(labels),
- type_(type),
- entry_id_(GetNextId(isolate)),
- exit_id_(GetNextId(isolate)) {
- ASSERT(labels == NULL || labels->length() > 0);
-}
-
-
-IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
- body_(NULL),
- continue_target_(),
- osr_entry_id_(GetNextId(isolate)) {
-}
-
-
-DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- cond_(NULL),
- condition_position_(-1),
- continue_id_(GetNextId(isolate)),
- back_edge_id_(GetNextId(isolate)) {
-}
-
-
-WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- cond_(NULL),
- may_have_function_literal_(true),
- body_id_(GetNextId(isolate)) {
-}
-
-
-ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- init_(NULL),
- cond_(NULL),
- next_(NULL),
- may_have_function_literal_(true),
- loop_variable_(NULL),
- continue_id_(GetNextId(isolate)),
- body_id_(GetNextId(isolate)) {
-}
-
-
-ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- each_(NULL),
- enumerable_(NULL),
- assignment_id_(GetNextId(isolate)) {
-}
-
-
-bool FunctionLiteral::strict_mode() const {
- return scope()->is_strict_mode();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_AST_INL_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 418cc432b..13e55894d 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -48,16 +48,19 @@ AST_NODE_LIST(DECL_ACCEPT)
// ----------------------------------------------------------------------------
// Implementation of other node functionality.
-Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
- return (expression()->AsAssignment() != NULL &&
- !expression()->AsAssignment()->is_compound())
- ? expression()->AsAssignment()
- : NULL;
+
+bool Expression::IsSmiLiteral() {
+ return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
+}
+
+
+bool Expression::IsStringLiteral() {
+ return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
}
-CountOperation* ExpressionStatement::StatementAsCountOperation() {
- return expression()->AsCountOperation();
+bool Expression::IsNullLiteral() {
+ return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
}
@@ -66,7 +69,6 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
- inside_with_(false),
is_trivial_(false),
position_(RelocInfo::kNoPosition) {
BindTo(var);
@@ -76,13 +78,11 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
VariableProxy::VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
- bool inside_with,
int position)
: Expression(isolate),
name_(name),
var_(NULL),
is_this_(is_this),
- inside_with_(inside_with),
is_trivial_(false),
position_(position) {
// Names must be canonicalized for fast equality checks.
@@ -157,6 +157,21 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
+int FunctionLiteral::start_position() const {
+ return scope()->start_position();
+}
+
+
+int FunctionLiteral::end_position() const {
+ return scope()->end_position();
+}
+
+
+LanguageMode FunctionLiteral::language_mode() const {
+ return scope()->language_mode();
+}
+
+
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
@@ -327,56 +342,77 @@ bool BinaryOperation::ResultOverwriteAllowed() {
}
-bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
- Handle<String>* check) {
- if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
-
- UnaryOperation* left_unary = left_->AsUnaryOperation();
- UnaryOperation* right_unary = right_->AsUnaryOperation();
- Literal* left_literal = left_->AsLiteral();
- Literal* right_literal = right_->AsLiteral();
-
- // Check for the pattern: typeof <expression> == <string literal>.
- if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
- right_literal != NULL && right_literal->handle()->IsString()) {
- *expr = left_unary->expression();
- *check = Handle<String>::cast(right_literal->handle());
+static bool IsTypeof(Expression* expr) {
+ UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+ return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
+}
+
+
+// Check for the pattern: typeof <expression> equals <string literal>.
+static bool MatchLiteralCompareTypeof(Expression* left,
+ Token::Value op,
+ Expression* right,
+ Expression** expr,
+ Handle<String>* check) {
+ if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
+ *expr = left->AsUnaryOperation()->expression();
+ *check = Handle<String>::cast(right->AsLiteral()->handle());
return true;
}
+ return false;
+}
+
- // Check for the pattern: <string literal> == typeof <expression>.
- if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
- left_literal != NULL && left_literal->handle()->IsString()) {
- *expr = right_unary->expression();
- *check = Handle<String>::cast(left_literal->handle());
+bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
+ Handle<String>* check) {
+ return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
+ MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
+}
+
+
+static bool IsVoidOfLiteral(Expression* expr) {
+ UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+ return maybe_unary != NULL &&
+ maybe_unary->op() == Token::VOID &&
+ maybe_unary->expression()->AsLiteral() != NULL;
+}
+
+
+// Check for the pattern: void <literal> equals <expression>
+static bool MatchLiteralCompareUndefined(Expression* left,
+ Token::Value op,
+ Expression* right,
+ Expression** expr) {
+ if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
+ *expr = right;
return true;
}
-
return false;
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
- if (op_ != Token::EQ_STRICT) return false;
+ return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
+ MatchLiteralCompareUndefined(right_, op_, left_, expr);
+}
- UnaryOperation* left_unary = left_->AsUnaryOperation();
- UnaryOperation* right_unary = right_->AsUnaryOperation();
- // Check for the pattern: <expression> === void <literal>.
- if (right_unary != NULL && right_unary->op() == Token::VOID &&
- right_unary->expression()->AsLiteral() != NULL) {
- *expr = left_;
+// Check for the pattern: null equals <expression>
+static bool MatchLiteralCompareNull(Expression* left,
+ Token::Value op,
+ Expression* right,
+ Expression** expr) {
+ if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
+ *expr = right;
return true;
}
+ return false;
+}
- // Check for the pattern: void <literal> === <expression>.
- if (left_unary != NULL && left_unary->op() == Token::VOID &&
- left_unary->expression()->AsLiteral() != NULL) {
- *expr = right_;
- return true;
- }
- return false;
+bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
+ return MatchLiteralCompareNull(left_, op_, right_, expr) ||
+ MatchLiteralCompareNull(right_, op_, left_, expr);
}
@@ -447,7 +483,7 @@ bool FunctionLiteral::IsInlineable() const {
bool ThisFunction::IsInlineable() const {
- return false;
+ return true;
}
@@ -529,7 +565,9 @@ bool Conditional::IsInlineable() const {
bool VariableProxy::IsInlineable() const {
- return var()->IsUnallocated() || var()->IsStackAllocated();
+ return var()->IsUnallocated()
+ || var()->IsStackAllocated()
+ || var()->IsContextSlot();
}
@@ -598,11 +636,6 @@ bool CompareOperation::IsInlineable() const {
}
-bool CompareToNull::IsInlineable() const {
- return expression()->IsInlineable();
-}
-
-
bool CountOperation::IsInlineable() const {
return expression()->IsInlineable();
}
@@ -677,6 +710,10 @@ void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
+ } else if (info.IsSymbol()) {
+ compare_type_ = SYMBOL_ONLY;
+ } else if (info.IsNonSymbol()) {
+ compare_type_ = STRING_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
@@ -705,7 +742,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
holder_ = Handle<JSObject>::null();
}
while (true) {
- LookupResult lookup;
+ LookupResult lookup(type->GetIsolate());
type->LookupInDescriptors(NULL, *name, &lookup);
// If the function wasn't found directly in the map, we start
// looking upwards through the prototype chain.
@@ -746,37 +783,41 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
+ is_monomorphic_ = oracle->CallIsMonomorphic(this);
Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
- // Specialize for the receiver types seen at runtime.
- Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(key->handle());
- receiver_types_.Clear();
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ if (property == NULL) {
+ // Function call. Specialize for monomorphic calls.
+ if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
+ } else {
+ // Method call. Specialize for the receiver types seen at runtime.
+ Literal* key = property->key()->AsLiteral();
+ ASSERT(key != NULL && key->handle()->IsString());
+ Handle<String> name = Handle<String>::cast(key->handle());
+ receiver_types_.Clear();
+ oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- int length = receiver_types_.length();
- for (int i = 0; i < length; i++) {
- Handle<Map> map = receiver_types_.at(i);
- ASSERT(!map.is_null() && *map != NULL);
+ if (FLAG_enable_slow_asserts) {
+ int length = receiver_types_.length();
+ for (int i = 0; i < length; i++) {
+ Handle<Map> map = receiver_types_.at(i);
+ ASSERT(!map.is_null() && *map != NULL);
+ }
}
- }
#endif
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
- check_type_ = oracle->GetCallCheckType(this);
- if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_.length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_.at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = Handle<JSObject>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_));
- map = Handle<Map>(holder_->map());
+ check_type_ = oracle->GetCallCheckType(this);
+ if (is_monomorphic_) {
+ Handle<Map> map;
+ if (receiver_types_.length() > 0) {
+ ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+ map = receiver_types_.at(0);
+ } else {
+ ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+ holder_ = Handle<JSObject>(
+ oracle->GetPrototypeForPrimitiveCheck(check_type_));
+ map = Handle<Map>(holder_->map());
+ }
+ is_monomorphic_ = ComputeTarget(map, name);
}
- is_monomorphic_ = ComputeTarget(map, name);
}
}
@@ -856,8 +897,6 @@ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
#undef MAKE_TYPE_CASE
-RegExpEmpty RegExpEmpty::kInstance;
-
static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
Interval result = Interval::Empty();
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index b56205f9a..805526af5 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -90,7 +90,6 @@ namespace internal {
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
- V(CompareToNull) \
V(ThisFunction)
#define AST_NODE_LIST(V) \
@@ -119,7 +118,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
- virtual type* As##type() { return this; }
class AstNode: public ZoneObject {
@@ -154,7 +152,8 @@ class AstNode: public ZoneObject {
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
- virtual type* As##type() { return NULL; }
+ bool Is##type() { return node_type() == AstNode::k##type; } \
+ type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
@@ -197,9 +196,6 @@ class Statement: public AstNode {
virtual Statement* AsStatement() { return this; }
- virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
- virtual CountOperation* StatementAsCountOperation() { return NULL; }
-
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -265,7 +261,6 @@ class Expression: public AstNode {
virtual Expression* AsExpression() { return this; }
- virtual bool IsTrivial() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -277,27 +272,24 @@ class Expression: public AstNode {
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; }
- // Mark the expression as being compiled as an expression
- // statement. This is used to transform postfix increments to
- // (faster) prefix increments.
- virtual void MarkAsStatement() { /* do nothing */ }
-
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
virtual bool ResultOverwriteAllowed() { return false; }
// True iff the expression is a literal represented as a smi.
- virtual bool IsSmiLiteral() { return false; }
+ bool IsSmiLiteral();
+
+ // True iff the expression is a string literal.
+ bool IsStringLiteral();
+
+ // True iff the expression is the null literal.
+ bool IsNullLiteral();
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
return false;
}
- virtual bool IsArrayLength() {
- UNREACHABLE();
- return false;
- }
virtual SmallMapList* GetReceiverTypes() {
UNREACHABLE();
return NULL;
@@ -343,7 +335,14 @@ class BreakableStatement: public Statement {
int ExitId() const { return exit_id_; }
protected:
- BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
+ BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
+ : labels_(labels),
+ type_(type),
+ entry_id_(GetNextId(isolate)),
+ exit_id_(GetNextId(isolate)) {
+ ASSERT(labels == NULL || labels->length() > 0);
+ }
+
private:
ZoneStringList* labels_;
@@ -356,22 +355,18 @@ class BreakableStatement: public Statement {
class Block: public BreakableStatement {
public:
- inline Block(Isolate* isolate,
- ZoneStringList* labels,
- int capacity,
- bool is_initializer_block);
-
- DECLARE_NODE_TYPE(Block)
-
- virtual Assignment* StatementAsSimpleAssignment() {
- if (statements_.length() != 1) return NULL;
- return statements_[0]->StatementAsSimpleAssignment();
+ Block(Isolate* isolate,
+ ZoneStringList* labels,
+ int capacity,
+ bool is_initializer_block)
+ : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+ statements_(capacity),
+ is_initializer_block_(is_initializer_block),
+ block_scope_(NULL) {
}
- virtual CountOperation* StatementAsCountOperation() {
- if (statements_.length() != 1) return NULL;
- return statements_[0]->StatementAsCountOperation();
- }
+
+ DECLARE_NODE_TYPE(Block)
virtual bool IsInlineable() const;
@@ -393,31 +388,32 @@ class Block: public BreakableStatement {
class Declaration: public AstNode {
public:
Declaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* fun,
Scope* scope)
: proxy_(proxy),
mode_(mode),
fun_(fun),
scope_(scope) {
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
// At the moment there are no "const functions"'s in JavaScript...
- ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
+ ASSERT(fun == NULL || mode == VAR || mode == LET);
}
DECLARE_NODE_TYPE(Declaration)
VariableProxy* proxy() const { return proxy_; }
- Variable::Mode mode() const { return mode_; }
+ VariableMode mode() const { return mode_; }
FunctionLiteral* fun() const { return fun_; } // may be NULL
virtual bool IsInlineable() const;
Scope* scope() const { return scope_; }
private:
VariableProxy* proxy_;
- Variable::Mode mode_;
+ VariableMode mode_;
FunctionLiteral* fun_;
// Nested scope from which the declaration originated.
@@ -441,7 +437,11 @@ class IterationStatement: public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
+ IterationStatement(Isolate* isolate, ZoneStringList* labels)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ body_(NULL),
+ osr_entry_id_(GetNextId(isolate)) {
+ }
void Initialize(Statement* body) {
body_ = body;
@@ -456,7 +456,13 @@ class IterationStatement: public BreakableStatement {
class DoWhileStatement: public IterationStatement {
public:
- inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
+ DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ cond_(NULL),
+ condition_position_(-1),
+ continue_id_(GetNextId(isolate)),
+ back_edge_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -489,7 +495,12 @@ class DoWhileStatement: public IterationStatement {
class WhileStatement: public IterationStatement {
public:
- inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
+ WhileStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ cond_(NULL),
+ may_have_function_literal_(true),
+ body_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(WhileStatement)
@@ -522,7 +533,16 @@ class WhileStatement: public IterationStatement {
class ForStatement: public IterationStatement {
public:
- inline ForStatement(Isolate* isolate, ZoneStringList* labels);
+ ForStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ init_(NULL),
+ cond_(NULL),
+ next_(NULL),
+ may_have_function_literal_(true),
+ loop_variable_(NULL),
+ continue_id_(GetNextId(isolate)),
+ body_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(ForStatement)
@@ -571,7 +591,12 @@ class ForStatement: public IterationStatement {
class ForInStatement: public IterationStatement {
public:
- inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
+ ForInStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ each_(NULL),
+ enumerable_(NULL),
+ assignment_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(ForInStatement)
@@ -606,9 +631,6 @@ class ExpressionStatement: public Statement {
virtual bool IsInlineable() const;
- virtual Assignment* StatementAsSimpleAssignment();
- virtual CountOperation* StatementAsCountOperation();
-
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
@@ -704,6 +726,8 @@ class CaseClause: public ZoneObject {
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+ bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
+ bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
private:
@@ -711,7 +735,13 @@ class CaseClause: public ZoneObject {
Label body_target_;
ZoneList<Statement*>* statements_;
int position_;
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+ enum CompareTypeFeedback {
+ NONE,
+ SMI_ONLY,
+ SYMBOL_ONLY,
+ STRING_ONLY,
+ OBJECT_ONLY
+ };
CompareTypeFeedback compare_type_;
int compare_id_;
int entry_id_;
@@ -720,7 +750,12 @@ class CaseClause: public ZoneObject {
class SwitchStatement: public BreakableStatement {
public:
- inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
+ SwitchStatement(Isolate* isolate, ZoneStringList* labels)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ tag_(NULL),
+ cases_(NULL) {
+ }
+
DECLARE_NODE_TYPE(SwitchStatement)
@@ -808,18 +843,25 @@ class TargetCollector: public AstNode {
class TryStatement: public Statement {
public:
- explicit TryStatement(Block* try_block)
- : try_block_(try_block), escaping_targets_(NULL) { }
+ explicit TryStatement(int index, Block* try_block)
+ : index_(index),
+ try_block_(try_block),
+ escaping_targets_(NULL) {
+ }
void set_escaping_targets(ZoneList<Label*>* targets) {
escaping_targets_ = targets;
}
+ int index() const { return index_; }
Block* try_block() const { return try_block_; }
ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
virtual bool IsInlineable() const;
private:
+ // Unique (per-function) index of this handler. This is not an AST ID.
+ int index_;
+
Block* try_block_;
ZoneList<Label*>* escaping_targets_;
};
@@ -827,11 +869,12 @@ class TryStatement: public Statement {
class TryCatchStatement: public TryStatement {
public:
- TryCatchStatement(Block* try_block,
+ TryCatchStatement(int index,
+ Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block)
- : TryStatement(try_block),
+ : TryStatement(index, try_block),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
@@ -853,8 +896,8 @@ class TryCatchStatement: public TryStatement {
class TryFinallyStatement: public TryStatement {
public:
- TryFinallyStatement(Block* try_block, Block* finally_block)
- : TryStatement(try_block),
+ TryFinallyStatement(int index, Block* try_block, Block* finally_block)
+ : TryStatement(index, try_block),
finally_block_(finally_block) { }
DECLARE_NODE_TYPE(TryFinallyStatement)
@@ -889,9 +932,6 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
- virtual bool IsTrivial() { return true; }
- virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
-
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
return handle_.is_identical_to(other->handle_);
@@ -1100,18 +1140,17 @@ class VariableProxy: public Expression {
public:
VariableProxy(Isolate* isolate, Variable* var);
+ VariableProxy(Isolate* isolate,
+ Handle<String> name,
+ bool is_this,
+ int position = RelocInfo::kNoPosition);
+
DECLARE_NODE_TYPE(VariableProxy)
virtual bool IsValidLeftHandSide() {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
- virtual bool IsTrivial() {
- // Reading from a mutable variable is a side effect, but the
- // variable for 'this' is immutable.
- return is_this_ || is_trivial_;
- }
-
virtual bool IsInlineable() const;
bool IsVariable(Handle<String> n) {
@@ -1123,7 +1162,6 @@ class VariableProxy: public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
- bool inside_with() const { return inside_with_; }
int position() const { return position_; }
void MarkAsTrivial() { is_trivial_ = true; }
@@ -1135,17 +1173,8 @@ class VariableProxy: public Expression {
Handle<String> name_;
Variable* var_; // resolved variable, or NULL
bool is_this_;
- bool inside_with_;
bool is_trivial_;
int position_;
-
- VariableProxy(Isolate* isolate,
- Handle<String> name,
- bool is_this,
- bool inside_with,
- int position = RelocInfo::kNoPosition);
-
- friend class Scope;
};
@@ -1182,7 +1211,7 @@ class Property: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual bool IsArrayLength() { return is_array_length_; }
+ bool IsArrayLength() { return is_array_length_; }
private:
Expression* obj_;
@@ -1320,8 +1349,17 @@ class UnaryOperation: public Expression {
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
+ : Expression(isolate),
+ op_(op),
+ expression_(expression),
+ pos_(pos),
+ materialize_true_id_(AstNode::kNoNumber),
+ materialize_false_id_(AstNode::kNoNumber) {
ASSERT(Token::IsUnaryOp(op));
+ if (op == Token::NOT) {
+ materialize_true_id_ = GetNextId(isolate);
+ materialize_false_id_ = GetNextId(isolate);
+ }
}
DECLARE_NODE_TYPE(UnaryOperation)
@@ -1334,10 +1372,18 @@ class UnaryOperation: public Expression {
Expression* expression() const { return expression_; }
virtual int position() const { return pos_; }
+ int MaterializeTrueId() { return materialize_true_id_; }
+ int MaterializeFalseId() { return materialize_false_id_; }
+
private:
Token::Value op_;
Expression* expression_;
int pos_;
+
+ // For unary not (Token::NOT), the AST ids where true and false will
+ // actually be materialized, respectively.
+ int materialize_true_id_;
+ int materialize_false_id_;
};
@@ -1465,6 +1511,7 @@ class CompareOperation: public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr);
+ bool IsLiteralCompareNull(Expression** expr);
private:
Token::Value op_;
@@ -1477,25 +1524,6 @@ class CompareOperation: public Expression {
};
-class CompareToNull: public Expression {
- public:
- CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
- : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
-
- DECLARE_NODE_TYPE(CompareToNull)
-
- virtual bool IsInlineable() const;
-
- bool is_strict() const { return is_strict_; }
- Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
- Expression* expression() const { return expression_; }
-
- private:
- bool is_strict_;
- Expression* expression_;
-};
-
-
class Conditional: public Expression {
public:
Conditional(Isolate* isolate,
@@ -1630,31 +1658,30 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body,
int materialized_literal_count,
int expected_property_count,
+ int handler_count,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
- int num_parameters,
- int start_position,
- int end_position,
+ int parameter_count,
Type type,
bool has_duplicate_parameters)
: Expression(isolate),
name_(name),
scope_(scope),
body_(body),
+ this_property_assignments_(this_property_assignments),
+ inferred_name_(isolate->factory()->empty_string()),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
- has_only_simple_this_property_assignments_(
- has_only_simple_this_property_assignments),
- this_property_assignments_(this_property_assignments),
- num_parameters_(num_parameters),
- start_position_(start_position),
- end_position_(end_position),
- function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(HEAP->empty_string()),
- is_expression_(type != DECLARATION),
- is_anonymous_(type == ANONYMOUS_EXPRESSION),
- pretenure_(false),
- has_duplicate_parameters_(has_duplicate_parameters) {
+ handler_count_(handler_count),
+ parameter_count_(parameter_count),
+ function_token_position_(RelocInfo::kNoPosition) {
+ bitfield_ =
+ HasOnlySimpleThisPropertyAssignments::encode(
+ has_only_simple_this_property_assignments) |
+ IsExpression::encode(type != DECLARATION) |
+ IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
+ Pretenure::encode(false) |
+ HasDuplicateParameters::encode(has_duplicate_parameters);
}
DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1664,21 +1691,23 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
- int start_position() const { return start_position_; }
- int end_position() const { return end_position_; }
- bool is_expression() const { return is_expression_; }
- bool is_anonymous() const { return is_anonymous_; }
- bool strict_mode() const;
+ int start_position() const;
+ int end_position() const;
+ bool is_expression() const { return IsExpression::decode(bitfield_); }
+ bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
+ bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
+ LanguageMode language_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
+ int handler_count() { return handler_count_; }
bool has_only_simple_this_property_assignments() {
- return has_only_simple_this_property_assignments_;
+ return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
}
Handle<FixedArray> this_property_assignments() {
return this_property_assignments_;
}
- int num_parameters() { return num_parameters_; }
+ int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
@@ -1692,29 +1721,33 @@ class FunctionLiteral: public Expression {
inferred_name_ = inferred_name;
}
- bool pretenure() { return pretenure_; }
- void set_pretenure(bool value) { pretenure_ = value; }
+ bool pretenure() { return Pretenure::decode(bitfield_); }
+ void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
virtual bool IsInlineable() const;
- bool has_duplicate_parameters() { return has_duplicate_parameters_; }
+ bool has_duplicate_parameters() {
+ return HasDuplicateParameters::decode(bitfield_);
+ }
private:
Handle<String> name_;
Scope* scope_;
ZoneList<Statement*>* body_;
+ Handle<FixedArray> this_property_assignments_;
+ Handle<String> inferred_name_;
+
int materialized_literal_count_;
int expected_property_count_;
- bool has_only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
- int num_parameters_;
- int start_position_;
- int end_position_;
+ int handler_count_;
+ int parameter_count_;
int function_token_position_;
- Handle<String> inferred_name_;
- bool is_expression_;
- bool is_anonymous_;
- bool pretenure_;
- bool has_duplicate_parameters_;
+
+ unsigned bitfield_;
+ class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
+ class IsExpression: public BitField<bool, 1, 1> {};
+ class IsAnonymous: public BitField<bool, 2, 1> {};
+ class Pretenure: public BitField<bool, 3, 1> {};
+ class HasDuplicateParameters: public BitField<bool, 4, 1> {};
};
@@ -2096,9 +2129,10 @@ class RegExpEmpty: public RegExpTree {
virtual bool IsEmpty();
virtual int min_match() { return 0; }
virtual int max_match() { return 0; }
- static RegExpEmpty* GetInstance() { return &kInstance; }
- private:
- static RegExpEmpty kInstance;
+ static RegExpEmpty* GetInstance() {
+ static RegExpEmpty* instance = ::new RegExpEmpty();
+ return instance;
+ }
};
diff --git a/deps/v8/src/atomicops_internals_mips_gcc.h b/deps/v8/src/atomicops_internals_mips_gcc.h
index 5113de289..9498fd76e 100644
--- a/deps/v8/src/atomicops_internals_mips_gcc.h
+++ b/deps/v8/src/atomicops_internals_mips_gcc.h
@@ -30,7 +30,7 @@
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
@@ -48,16 +48,19 @@ namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("1:\n"
- "ll %0, %1\n" // prev = *ptr
+ Atomic32 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "nop\n" // delay slot nop
- "sc %2, %1\n" // *ptr = new_value (with atomic check)
+ "move %2, %4\n" // tmp = new_value
+ "sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
- : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
@@ -68,12 +71,15 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
- __asm__ __volatile__("1:\n"
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
+ ".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
@@ -87,13 +93,15 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
- __asm__ __volatile__("1:\n"
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
"ll %0, %2\n" // temp = *ptr
- "addu %0, %3\n" // temp = temp + increment
- "move %1, %0\n" // temp2 = temp
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
@@ -103,6 +111,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
+ ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
@@ -117,16 +126,19 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
- return x;
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -134,7 +146,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
}
inline void MemoryBarrier() {
- ATOMICOPS_COMPILER_BARRIER();
+ __asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index f07e625ec..29c16ee93 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -34,9 +34,11 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
+#include "isolate-inl.h"
#include "macro-assembler.h"
#include "natives.h"
#include "objects-visiting.h"
+#include "platform.h"
#include "snapshot.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
@@ -209,12 +211,31 @@ class Genesis BASE_EMBEDDED {
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
+
+ enum ExtensionTraversalState {
+ UNVISITED, VISITED, INSTALLED
+ };
+
+ class ExtensionStates {
+ public:
+ ExtensionStates();
+ ExtensionTraversalState get_state(RegisteredExtension* extension);
+ void set_state(RegisteredExtension* extension,
+ ExtensionTraversalState state);
+ private:
+ Allocator allocator_;
+ HashMap map_;
+ DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
+ };
+
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
static bool InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions);
- static bool InstallExtension(const char* name);
- static bool InstallExtension(v8::RegisteredExtension* current);
+ static bool InstallExtension(const char* name,
+ ExtensionStates* extension_states);
+ static bool InstallExtension(v8::RegisteredExtension* current,
+ ExtensionStates* extension_states);
static void InstallSpecialObjects(Handle<Context> global_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
@@ -361,6 +382,7 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
+ function->shared()->set_native(true);
return function;
}
@@ -374,26 +396,28 @@ Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+
{ // Add length.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
- descriptors->Set(0, &d);
+ descriptors->Set(0, &d, witness);
}
{ // Add name.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
- descriptors->Set(1, &d);
+ descriptors->Set(1, &d, witness);
}
{ // Add arguments.
Handle<Foreign> foreign =
factory()->NewForeign(&Accessors::FunctionArguments);
CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
- descriptors->Set(2, &d);
+ descriptors->Set(2, &d, witness);
}
{ // Add caller.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
- descriptors->Set(3, &d);
+ descriptors->Set(3, &d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
@@ -403,9 +427,9 @@ Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
Handle<Foreign> foreign =
factory()->NewForeign(&Accessors::FunctionPrototype);
CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
- descriptors->Set(4, &d);
+ descriptors->Set(4, &d, witness);
}
- descriptors->Sort();
+ descriptors->Sort(witness);
return descriptors;
}
@@ -478,7 +502,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// 262 15.3.4.
Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
+ factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
// --- E m p t y ---
Handle<Code> code =
@@ -521,41 +545,43 @@ Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
? 4
: 5);
PropertyAttributes attributes = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ DONT_ENUM | DONT_DELETE);
+
+ DescriptorArray::WhitenessWitness witness(*descriptors);
{ // length
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
- descriptors->Set(0, &d);
+ descriptors->Set(0, &d, witness);
}
{ // name
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
- descriptors->Set(1, &d);
+ descriptors->Set(1, &d, witness);
}
{ // arguments
CallbacksDescriptor d(*factory()->arguments_symbol(),
*arguments,
attributes);
- descriptors->Set(2, &d);
+ descriptors->Set(2, &d, witness);
}
{ // caller
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
- descriptors->Set(3, &d);
+ descriptors->Set(3, &d, witness);
}
// prototype
if (prototypeMode != DONT_ADD_PROTOTYPE) {
- if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
- attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
+ if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
+ attributes = static_cast<PropertyAttributes>(attributes | READ_ONLY);
}
Handle<Foreign> foreign =
factory()->NewForeign(&Accessors::FunctionPrototype);
CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
- descriptors->Set(4, &d);
+ descriptors->Set(4, &d, witness);
}
- descriptors->Sort();
+ descriptors->Sort(witness);
return descriptors;
}
@@ -565,7 +591,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
if (throw_type_error_function.is_null()) {
Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
throw_type_error_function =
- factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
+ factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
throw_type_error_function->set_map(
@@ -940,6 +966,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
ASSERT_EQ(0, initial_map->inobject_properties());
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
int enum_index = 0;
@@ -949,7 +976,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kSourceFieldIndex,
final,
enum_index++);
- descriptors->Set(0, &field);
+ descriptors->Set(0, &field, witness);
}
{
// ECMA-262, section 15.10.7.2.
@@ -957,7 +984,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kGlobalFieldIndex,
final,
enum_index++);
- descriptors->Set(1, &field);
+ descriptors->Set(1, &field, witness);
}
{
// ECMA-262, section 15.10.7.3.
@@ -965,7 +992,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kIgnoreCaseFieldIndex,
final,
enum_index++);
- descriptors->Set(2, &field);
+ descriptors->Set(2, &field, witness);
}
{
// ECMA-262, section 15.10.7.4.
@@ -973,7 +1000,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kMultilineFieldIndex,
final,
enum_index++);
- descriptors->Set(3, &field);
+ descriptors->Set(3, &field, witness);
}
{
// ECMA-262, section 15.10.7.5.
@@ -983,10 +1010,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSRegExp::kLastIndexFieldIndex,
writable,
enum_index++);
- descriptors->Set(4, &field);
+ descriptors->Set(4, &field, witness);
}
descriptors->SetNextEnumerationIndex(enum_index);
- descriptors->Sort();
+ descriptors->Sort(witness);
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
@@ -995,6 +1022,26 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
+
+ // RegExp prototype object is itself a RegExp.
+ Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
+ proto_map->set_prototype(global_context()->initial_object_prototype());
+ Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
+ proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
+ heap->empty_string());
+ proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
+ heap->false_value());
+ proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
+ heap->false_value());
+ proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
+ heap->false_value());
+ proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0),
+ SKIP_WRITE_BARRIER); // It's a Smi.
+ initial_map->set_prototype(*proto);
+ factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
+ JSRegExp::IRREGEXP, factory->empty_string(),
+ JSRegExp::Flags(0), 0);
}
{ // -- J S O N
@@ -1044,7 +1091,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
DONT_ENUM);
#ifdef DEBUG
- LookupResult lookup;
+ LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
@@ -1063,11 +1110,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // --- aliased_arguments_boilerplate_
- Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
- Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
- new_map->set_pre_allocated_property_fields(2);
- Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
// Set up a well-formed parameter map to make assertions happy.
Handle<FixedArray> elements = factory->NewFixedArray(2);
elements->set_map(heap->non_strict_arguments_elements_map());
@@ -1076,7 +1118,16 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
+
+ Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+ Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+ new_map->set_pre_allocated_property_fields(2);
+ Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+ // Set elements kind after allocating the object because
+ // NewJSObjectFromMap assumes a fast elements map.
+ new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
+ ASSERT(result->HasNonStrictArgumentsElements());
global_context()->set_aliased_arguments_boilerplate(*result);
}
@@ -1099,19 +1150,20 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
{ // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
- descriptors->Set(0, &d);
+ descriptors->Set(0, &d, witness);
}
{ // callee
CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
- descriptors->Set(1, &d);
+ descriptors->Set(1, &d, witness);
}
{ // caller
CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
- descriptors->Set(2, &d);
+ descriptors->Set(2, &d, witness);
}
- descriptors->Sort();
+ descriptors->Sort(witness);
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
@@ -1136,7 +1188,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
DONT_ENUM);
#ifdef DEBUG
- LookupResult lookup;
+ LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
@@ -1195,6 +1247,14 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the data slot.
global_context()->set_data(heap->undefined_value());
+
+ {
+ // Initialize the random seed slot.
+ Handle<ByteArray> zeroed_byte_array(
+ factory->NewByteArray(kRandomStateSize));
+ global_context()->set_random_seed(*zeroed_byte_array);
+ memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
+ }
}
@@ -1202,12 +1262,26 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
// TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
- // longer need to live behind a flag, so WeakMap gets added to the snapshot.
- if (FLAG_harmony_weakmaps) { // -- W e a k M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- prototype, Builtins::kIllegal, true);
+ // longer need to live behind a flag, so functions get added to the snapshot.
+ if (FLAG_harmony_collections) {
+ { // -- S e t
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+ prototype, Builtins::kIllegal, true);
+ }
+ { // -- M a p
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+ prototype, Builtins::kIllegal, true);
+ }
+ { // -- W e a k M a p
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ prototype, Builtins::kIllegal, true);
+ }
}
}
@@ -1327,6 +1401,8 @@ void Genesis::InstallNativeFunctions() {
configure_instance_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
+ INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
+ to_complete_property_descriptor);
}
void Genesis::InstallExperimentalNativeFunctions() {
@@ -1334,6 +1410,7 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
+ INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
}
@@ -1555,6 +1632,18 @@ bool Genesis::InstallNatives() {
isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments();
+ // InternalArrays should not use Smi-Only array optimizations. There are too
+ // many places in the C++ runtime code (e.g. RegEx) that assume that
+ // elements in InternalArrays can be set to non-Smi values without going
+ // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
+ // transition easy to trap. Moreover, they rarely are smi-only.
+ MaybeObject* maybe_map =
+ array_function->initial_map()->CopyDropTransitions();
+ Map* new_map;
+ if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+ new_map->set_elements_kind(FAST_ELEMENTS);
+ array_function->set_initial_map(new_map);
+
// Make "length" magic on instances.
Handle<DescriptorArray> array_descriptors =
factory()->CopyAppendForeignDescriptor(
@@ -1656,7 +1745,9 @@ bool Genesis::InstallNatives() {
Handle<DescriptorArray> reresult_descriptors =
factory()->NewDescriptorArray(3);
- reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
+ DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
+
+ reresult_descriptors->CopyFrom(0, *array_descriptors, 0, witness);
int enum_index = 0;
{
@@ -1664,7 +1755,7 @@ bool Genesis::InstallNatives() {
JSRegExpResult::kIndexIndex,
NONE,
enum_index++);
- reresult_descriptors->Set(1, &index_field);
+ reresult_descriptors->Set(1, &index_field, witness);
}
{
@@ -1672,9 +1763,9 @@ bool Genesis::InstallNatives() {
JSRegExpResult::kInputIndex,
NONE,
enum_index++);
- reresult_descriptors->Set(2, &input_field);
+ reresult_descriptors->Set(2, &input_field, witness);
}
- reresult_descriptors->Sort();
+ reresult_descriptors->Sort(witness);
initial_map->set_inobject_properties(2);
initial_map->set_pre_allocated_property_fields(2);
@@ -1701,9 +1792,9 @@ bool Genesis::InstallExperimentalNatives() {
"native proxy.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
- if (FLAG_harmony_weakmaps &&
+ if (FLAG_harmony_collections &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native weakmap.js") == 0) {
+ "native collection.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
@@ -1863,6 +1954,34 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
#endif
}
+static uint32_t Hash(RegisteredExtension* extension) {
+ return v8::internal::ComputePointerHash(extension);
+}
+
+static bool MatchRegisteredExtensions(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+Genesis::ExtensionStates::ExtensionStates()
+ : allocator_(),
+ map_(MatchRegisteredExtensions, &allocator_, 8)
+ {}
+
+Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
+ RegisteredExtension* extension) {
+ i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension), false);
+ if (entry == NULL) {
+ return UNVISITED;
+ }
+ return static_cast<ExtensionTraversalState>(
+ reinterpret_cast<intptr_t>(entry->value));
+}
+
+void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
+ ExtensionTraversalState state) {
+ map_.Lookup(extension, Hash(extension), true)->value =
+ reinterpret_cast<void*>(static_cast<intptr_t>(state));
+}
bool Genesis::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
@@ -1870,29 +1989,27 @@ bool Genesis::InstallExtensions(Handle<Context> global_context,
// effort. (The external API reads 'ignore'-- does that mean
// we can break the interface?)
- // Clear coloring of extension list
- v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
- while (current != NULL) {
- current->set_state(v8::UNVISITED);
- current = current->next();
- }
+
+ ExtensionStates extension_states; // All extensions have state UNVISITED.
// Install auto extensions.
- current = v8::RegisteredExtension::first_extension();
+ v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
while (current != NULL) {
if (current->extension()->auto_enable())
- InstallExtension(current);
+ InstallExtension(current, &extension_states);
current = current->next();
}
- if (FLAG_expose_gc) InstallExtension("v8/gc");
- if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
+ if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states);
+ if (FLAG_expose_externalize_string) {
+ InstallExtension("v8/externalize", &extension_states);
+ }
if (extensions == NULL) return true;
// Install required extensions
int count = v8::ImplementationUtilities::GetNameCount(extensions);
const char** names = v8::ImplementationUtilities::GetNames(extensions);
for (int i = 0; i < count; i++) {
- if (!InstallExtension(names[i]))
+ if (!InstallExtension(names[i], &extension_states))
return false;
}
@@ -1902,7 +2019,8 @@ bool Genesis::InstallExtensions(Handle<Context> global_context,
// Installs a named extension. This methods is unoptimized and does
// not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name) {
+bool Genesis::InstallExtension(const char* name,
+ ExtensionStates* extension_states) {
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
// Loop until we find the relevant extension
while (current != NULL) {
@@ -1915,42 +2033,52 @@ bool Genesis::InstallExtension(const char* name) {
"v8::Context::New()", "Cannot find required extension");
return false;
}
- return InstallExtension(current);
+ return InstallExtension(current, extension_states);
}
-bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
+bool Genesis::InstallExtension(v8::RegisteredExtension* current,
+ ExtensionStates* extension_states) {
HandleScope scope;
- if (current->state() == v8::INSTALLED) return true;
+ if (extension_states->get_state(current) == INSTALLED) return true;
// The current node has already been visited so there must be a
// cycle in the dependency graph; fail.
- if (current->state() == v8::VISITED) {
+ if (extension_states->get_state(current) == VISITED) {
v8::Utils::ReportApiFailure(
"v8::Context::New()", "Circular extension dependency");
return false;
}
- ASSERT(current->state() == v8::UNVISITED);
- current->set_state(v8::VISITED);
+ ASSERT(extension_states->get_state(current) == UNVISITED);
+ extension_states->set_state(current, VISITED);
v8::Extension* extension = current->extension();
// Install the extension's dependencies
for (int i = 0; i < extension->dependency_count(); i++) {
- if (!InstallExtension(extension->dependencies()[i])) return false;
+ if (!InstallExtension(extension->dependencies()[i], extension_states))
+ return false;
}
Isolate* isolate = Isolate::Current();
- Vector<const char> source = CStrVector(extension->source());
- Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
- bool result = CompileScriptCached(CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
+ Handle<String> source_code =
+ isolate->factory()->NewExternalStringFromAscii(extension->source());
+ bool result = CompileScriptCached(
+ CStrVector(extension->name()),
+ source_code,
+ isolate->bootstrapper()->extensions_cache(),
+ extension,
+ Handle<Context>(isolate->context()),
+ false);
ASSERT(isolate->has_pending_exception() != result);
if (!result) {
+ // We print out the name of the extension that fail to install.
+ // When an error is thrown during bootstrapping we automatically print
+ // the line number at which this happened to the console in the isolate
+ // error throwing functionality.
+ OS::PrintError("Error installing extension '%s'.\n",
+ current->extension()->name());
isolate->clear_pending_exception();
}
- current->set_state(v8::INSTALLED);
+ extension_states->set_state(current, INSTALLED);
+ isolate->NotifyExtensionInstalled();
return result;
}
@@ -1967,7 +2095,9 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
builtins->set_javascript_builtin(id, *function);
Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared());
- if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+ if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ return false;
+ }
// Set the code object on the function object.
function->ReplaceCode(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code());
@@ -2047,7 +2177,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
break;
}
case CALLBACKS: {
- LookupResult result;
+ LookupResult result(isolate());
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
if (result.IsProperty()) continue;
@@ -2085,7 +2215,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (properties->IsKey(raw_key)) {
ASSERT(raw_key->IsString());
// If the property is already there we skip it.
- LookupResult result;
+ LookupResult result(isolate());
to->LocalLookup(String::cast(raw_key), &result);
if (result.IsProperty()) continue;
// Set the property.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index e6a0699f0..43cf358d4 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -33,6 +33,7 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
+#include "mark-compact.h"
#include "vm-state-inl.h"
namespace v8 {
@@ -202,7 +203,7 @@ BUILTIN(ArrayCodeGeneric) {
}
// 'array' now contains the JSArray we should initialize.
- ASSERT(array->HasFastElements());
+ ASSERT(array->HasFastTypeElements());
// Optimize the case where there is one argument and the argument is a
// small smi.
@@ -215,7 +216,8 @@ BUILTIN(ArrayCodeGeneric) {
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- array->SetContent(FixedArray::cast(obj));
+ MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj));
+ if (maybe_obj->IsFailure()) return maybe_obj;
return array;
}
}
@@ -239,6 +241,11 @@ BUILTIN(ArrayCodeGeneric) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
+ // Set length and elements on the array.
+ MaybeObject* maybe_object =
+ array->EnsureCanContainElements(FixedArray::cast(obj));
+ if (maybe_object->IsFailure()) return maybe_object;
+
AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -247,7 +254,6 @@ BUILTIN(ArrayCodeGeneric) {
elms->set(index, args[index+1], mode);
}
- // Set length and elements on the array.
array->set_elements(FixedArray::cast(obj));
array->set_length(len);
@@ -295,6 +301,7 @@ static void CopyElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
+ heap->incremental_marking()->RecordWrites(dst);
}
@@ -313,6 +320,7 @@ static void MoveElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
+ heap->incremental_marking()->RecordWrites(dst);
}
@@ -358,6 +366,14 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
+ // Maintain marking consistency for HeapObjectIterator and
+ // IncrementalMarking.
+ int size_delta = to_trim * kPointerSize;
+ if (heap->marking()->TransferMark(elms->address(),
+ elms->address() + size_delta)) {
+ MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
+ }
+
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@@ -369,9 +385,6 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
// This method depends on non writability of Object and Array prototype
// fields.
if (array_proto->elements() != heap->empty_fixed_array()) return false;
- // Hidden prototype
- array_proto = JSObject::cast(array_proto->GetPrototype());
- ASSERT(array_proto->elements() == heap->empty_fixed_array());
// Object.prototype
Object* proto = array_proto->GetPrototype();
if (proto == heap->null_value()) return false;
@@ -384,20 +397,42 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver) {
+ Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements();
- if (elms->map() == heap->fixed_array_map()) return elms;
- if (elms->map() == heap->fixed_cow_array_map()) {
- return array->EnsureWritableFastElements();
+ Map* map = elms->map();
+ if (map == heap->fixed_array_map()) {
+ if (args == NULL || !array->HasFastSmiOnlyElements()) {
+ return elms;
+ }
+ } else if (map == heap->fixed_cow_array_map()) {
+ MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
+ if (args == NULL || !array->HasFastSmiOnlyElements() ||
+ maybe_writable_result->IsFailure()) {
+ return maybe_writable_result;
+ }
+ } else {
+ return NULL;
}
- return NULL;
+
+ // Need to ensure that the arguments passed in args can be contained in
+ // the array.
+ int args_length = args->length();
+ if (first_added_arg >= args_length) return array->elements();
+
+ MaybeObject* maybe_array = array->EnsureCanContainElements(
+ args,
+ first_added_arg,
+ args_length - first_added_arg);
+ if (maybe_array->IsFailure()) return maybe_array;
+ return array->elements();
}
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
+ if (!FLAG_clever_optimizations) return false;
Context* global_context = heap->isolate()->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
@@ -413,20 +448,18 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(
- isolate->global_context()->builtins()),
- name);
- ASSERT(js_builtin->IsJSFunction());
- Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
- ScopedVector<Object**> argv(args.length() - 1);
- int n_args = args.length() - 1;
- for (int i = 0; i < n_args; i++) {
- argv[i] = args.at<Object>(i + 1).location();
- }
- bool pending_exception = false;
+ GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
+ name);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
+ int argc = args.length() - 1;
+ ScopedVector<Handle<Object> > argv(argc);
+ for (int i = 0; i < argc; ++i) {
+ argv[i] = args.at<Object>(i + 1);
+ }
+ bool pending_exception;
Handle<Object> result = Execution::Call(function,
args.receiver(),
- n_args,
+ argc,
argv.start(),
&pending_exception);
if (pending_exception) return Failure::Exception();
@@ -439,7 +472,7 @@ BUILTIN(ArrayPush) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
@@ -475,7 +508,6 @@ BUILTIN(ArrayPush) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
- array->set_elements(elms);
}
// Add the provided values.
@@ -485,6 +517,10 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
+ if (elms != array->elements()) {
+ array->set_elements(elms);
+ }
+
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
@@ -496,7 +532,7 @@ BUILTIN(ArrayPop) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
@@ -529,7 +565,7 @@ BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -539,7 +575,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastElements());
+ ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@@ -551,9 +587,7 @@ BUILTIN(ArrayShift) {
}
if (!heap->lo_space()->Contains(elms)) {
- // As elms still in the same space they used to be,
- // there is no need to update region dirty mark.
- array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
+ array->set_elements(LeftTrimFixedArray(heap, elms, 1));
} else {
// Shift the elements.
AssertNoAllocation no_gc;
@@ -573,7 +607,7 @@ BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -583,7 +617,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastElements());
+ ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -592,6 +626,10 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
+ MaybeObject* maybe_object =
+ array->EnsureCanContainElements(&args, 1, to_add);
+ if (maybe_object->IsFailure()) return maybe_object;
+
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@@ -600,13 +638,11 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
-
AssertNoAllocation no_gc;
if (len > 0) {
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
}
FillWithHoles(heap, new_elms, new_length, capacity);
-
elms = new_elms;
array->set_elements(elms);
} else {
@@ -634,7 +670,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastElements() ||
+ if (!array->HasFastTypeElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -650,7 +686,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastElements();
+ && JSObject::cast(receiver)->HasFastTypeElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -721,6 +757,10 @@ BUILTIN(ArraySlice) {
}
FixedArray* result_elms = FixedArray::cast(result);
+ MaybeObject* maybe_object =
+ result_array->EnsureCanContainElements(result_elms);
+ if (maybe_object->IsFailure()) return maybe_object;
+
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
@@ -729,6 +769,14 @@ BUILTIN(ArraySlice) {
// Set the length.
result_array->set_length(Smi::FromInt(result_len));
+
+ // Set the ElementsKind.
+ ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
+ if (IsMoreGeneralElementsKindTransition(result_array->GetElementsKind(),
+ elements_kind)) {
+ MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
+ if (maybe->IsFailure()) return maybe;
+ }
return result_array;
}
@@ -738,7 +786,7 @@ BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver);
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -748,7 +796,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastElements());
+ ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
@@ -822,12 +870,20 @@ BUILTIN(ArraySplice) {
// Set the length.
result_array->set_length(Smi::FromInt(actual_delete_count));
+
+ // Set the ElementsKind.
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsMoreGeneralElementsKindTransition(result_array->GetElementsKind(),
+ elements_kind)) {
+ MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
+ if (maybe->IsFailure()) return maybe;
+ }
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
-
int new_length = len - actual_delete_count + item_count;
+ bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms) &&
@@ -842,7 +898,8 @@ BUILTIN(ArraySplice) {
}
elms = LeftTrimFixedArray(heap, elms, delta);
- array->set_elements(elms, SKIP_WRITE_BARRIER);
+
+ elms_changed = true;
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@@ -882,7 +939,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
- array->set_elements(elms);
+ elms_changed = true;
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@@ -898,6 +955,10 @@ BUILTIN(ArraySplice) {
elms->set(k, args[3 + k - actual_start], mode);
}
+ if (elms_changed) {
+ array->set_elements(elms);
+ }
+
// Set the length.
array->set_length(Smi::FromInt(new_length));
@@ -920,7 +981,7 @@ BUILTIN(ArrayConcat) {
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
- if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
+ if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -956,6 +1017,17 @@ BUILTIN(ArrayConcat) {
}
FixedArray* result_elms = FixedArray::cast(result);
+ // Ensure element type transitions happen before copying elements in.
+ if (result_array->HasFastSmiOnlyElements()) {
+ for (int i = 0; i < n_arguments; i++) {
+ JSArray* array = JSArray::cast(args[i]);
+ if (!array->HasFastSmiOnlyElements()) {
+ result_array->EnsureCanContainNonSmiElements();
+ break;
+ }
+ }
+ }
+
// Copy data.
AssertNoAllocation no_gc;
int start_pos = 0;
@@ -1448,6 +1520,14 @@ static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
+static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
+}
+
+static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
+}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateLoadICDebugBreak(masm);
@@ -1479,8 +1559,8 @@ static void Generate_Return_DebugBreak(MacroAssembler* masm) {
}
-static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateStubNoRegistersDebugBreak(masm);
+static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
+ Debug::GenerateCallFunctionStubDebugBreak(masm);
}
@@ -1607,20 +1687,22 @@ void Builtins::Setup(bool create_heap_objects) {
const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated
- // buffer, before copying it into individual code objects.
- byte buffer[4*KB];
+ // buffer, before copying it into individual code objects. Be careful
+ // with alignment, some platforms don't like unaligned code.
+ union { int force_alignment; byte buffer[4*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
- MacroAssembler masm(isolate, buffer, sizeof buffer);
+ MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
+ ASSERT(!masm.has_frame());
g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 31090d3a0..3659f9912 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -167,6 +167,10 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
+ V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
@@ -188,27 +192,27 @@ enum BuiltinExtraArguments {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V) \
+ V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@@ -234,7 +238,6 @@ enum BuiltinExtraArguments {
V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
- V(GET_KEYS, 0) \
V(FILTER_KEY, 1) \
V(CALL_NON_FUNCTION, 0) \
V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
diff --git a/deps/v8/src/bytecodes-irregexp.h b/deps/v8/src/bytecodes-irregexp.h
index 93218ea9f..b13efb36f 100644
--- a/deps/v8/src/bytecodes-irregexp.h
+++ b/deps/v8/src/bytecodes-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,12 +33,12 @@ namespace v8 {
namespace internal {
-static const int BYTECODE_MASK = 0xff;
+const int BYTECODE_MASK = 0xff;
// The first argument is packed in with the byte code in one word, but so it
// has 24 bits, but it can be positive and negative so only use 23 bits for
// positive values.
-static const unsigned int MAX_FIRST_ARG = 0x7fffffu;
-static const int BYTECODE_SHIFT = 8;
+const unsigned int MAX_FIRST_ARG = 0x7fffffu;
+const int BYTECODE_SHIFT = 8;
#define BYTECODE_ITERATOR(V) \
V(BREAK, 0, 4) /* bc8 */ \
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index 30a67a661..9241d2658 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -134,14 +134,12 @@ static const CachedPower kCachedPowers[] = {
};
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
+static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
-const int PowersOfTenCache::kDecimalExponentDistance =
- kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMinDecimalExponent =
- kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMaxDecimalExponent =
- kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
+// Difference between the decimal exponents in the table above.
+const int PowersOfTenCache::kDecimalExponentDistance = 8;
+const int PowersOfTenCache::kMinDecimalExponent = -348;
+const int PowersOfTenCache::kMaxDecimalExponent = 340;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent,
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 0dfc80d0b..1a89ef3b1 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,7 +52,7 @@ inline bool IsLineFeed(uc32 c) {
}
-static inline bool IsInRange(int value, int lower_limit, int higher_limit) {
+inline bool IsInRange(int value, int lower_limit, int higher_limit) {
ASSERT(lower_limit <= higher_limit);
return static_cast<unsigned int>(value - lower_limit) <=
static_cast<unsigned int>(higher_limit - lower_limit);
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 2f359f6cd..8608b0eba 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,10 +52,10 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// Used by the CHECK macro -- should not be called directly.
-static inline void CheckHelper(const char* file,
- int line,
- const char* source,
- bool condition) {
+inline void CheckHelper(const char* file,
+ int line,
+ const char* source,
+ bool condition) {
if (!condition)
V8_Fatal(file, line, "CHECK(%s) failed", source);
}
@@ -63,14 +63,16 @@ static inline void CheckHelper(const char* file,
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
-#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
+#define CHECK(condition) do { \
+ if (!(condition)) CheckHelper(__FILE__, __LINE__, #condition, false); \
+ } while (0)
// Helper function used by the CHECK_EQ function when given int
// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source, int expected,
- const char* value_source, int value) {
+inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source, int expected,
+ const char* value_source, int value) {
if (expected != value) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
@@ -81,11 +83,11 @@ static inline void CheckEqualsHelper(const char* file, int line,
// Helper function used by the CHECK_EQ function when given int64_t
// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
+inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source,
+ int64_t expected,
+ const char* value_source,
+ int64_t value) {
if (expected != value) {
// Print int64_t values in hex, as two int32s,
// to avoid platform-dependencies.
@@ -103,12 +105,12 @@ static inline void CheckEqualsHelper(const char* file, int line,
// Helper function used by the CHECK_NE function when given int
// arguments. Should not be called directly.
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- int unexpected,
- const char* value_source,
- int value) {
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ int unexpected,
+ const char* value_source,
+ int value) {
if (unexpected == value) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
unexpected_source, value_source, value);
@@ -118,12 +120,12 @@ static inline void CheckNonEqualsHelper(const char* file,
// Helper function used by the CHECK function when given string
// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
+inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const char* expected,
+ const char* value_source,
+ const char* value) {
if ((expected == NULL && value != NULL) ||
(expected != NULL && value == NULL) ||
(expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
@@ -134,12 +136,12 @@ static inline void CheckEqualsHelper(const char* file,
}
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const char* expected,
+ const char* value_source,
+ const char* value) {
if (expected == value ||
(expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
@@ -150,12 +152,12 @@ static inline void CheckNonEqualsHelper(const char* file,
// Helper function used by the CHECK function when given pointer
// arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
+inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const void* expected,
+ const char* value_source,
+ const void* value) {
if (expected != value) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
@@ -165,12 +167,12 @@ static inline void CheckEqualsHelper(const char* file,
}
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const void* expected,
+ const char* value_source,
+ const void* value) {
if (expected == value) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
expected_source, value_source, value);
@@ -180,12 +182,12 @@ static inline void CheckNonEqualsHelper(const char* file,
// Helper function used by the CHECK function when given floating
// point arguments. Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
+inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ double expected,
+ const char* value_source,
+ double value) {
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
volatile double* exp = new double[1];
*exp = expected;
@@ -201,12 +203,12 @@ static inline void CheckEqualsHelper(const char* file,
}
-static inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ double expected,
+ const char* value_source,
+ double value) {
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
volatile double* exp = new double[1];
*exp = expected;
@@ -257,11 +259,8 @@ template <int> class StaticAssertionHelper { };
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
-namespace v8 { namespace internal {
+extern bool FLAG_enable_slow_asserts;
-bool EnableSlowAsserts();
-
-} } // namespace v8::internal
// The ASSERT macro is equivalent to CHECK except that it only
// generates code in debug builds.
@@ -273,7 +272,7 @@ bool EnableSlowAsserts();
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
+#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 00da4cba6..ba7df802f 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -52,11 +52,12 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment();
- // Nested stubs are not allowed for leafs.
- AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(masm, false);
// Generate the code for the stub.
masm->set_generating_stub(true);
+ NoCurrentFrameScope scope(masm);
Generate(masm);
}
@@ -118,7 +119,7 @@ Handle<Code> CodeStub::GetCode() {
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
- FinishCode(*new_object);
+ FinishCode(new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
@@ -127,8 +128,10 @@ Handle<Code> CodeStub::GetCode() {
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);
-
code = *new_object;
+ Activate(code);
+ } else {
+ CHECK(IsPregenerated() == code->is_pregenerated());
}
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@@ -136,43 +139,6 @@ Handle<Code> CodeStub::GetCode() {
}
-MaybeObject* CodeStub::TryGetCode() {
- Code* code;
- if (!FindCodeInCache(&code)) {
- // Generate the new code.
- MacroAssembler masm(Isolate::Current(), NULL, 256);
- GenerateCode(&masm);
- Heap* heap = masm.isolate()->heap();
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Try to copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState());
- Object* new_object;
- { MaybeObject* maybe_new_object =
- heap->CreateCode(desc, flags, masm.CodeObject());
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- code = Code::cast(new_object);
- RecordCodeGeneration(code, &masm);
- FinishCode(code);
-
- // Try to update the code cache but do not fail if unable.
- MaybeObject* maybe_new_object =
- heap->code_stubs()->AtNumberPut(GetKey(), code);
- if (maybe_new_object->ToObject(&new_object)) {
- heap->public_set_code_stubs(NumberDictionary::cast(new_object));
- }
- }
-
- return code;
-}
-
-
const char* CodeStub::MajorName(CodeStub::Major major_key,
bool allow_unknown_keys) {
switch (major_key) {
@@ -188,6 +154,11 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
+void CodeStub::PrintName(StringStream* stream) {
+ stream->Add("%s", MajorName(MajorKey(), false));
+}
+
+
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@@ -242,9 +213,18 @@ void InstanceofStub::PrintName(StringStream* stream) {
}
+void JSEntryStub::FinishCode(Handle<Code> code) {
+ Handle<FixedArray> handler_table =
+ code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
+ handler_table->set(0, Smi::FromInt(handler_offset_));
+ code->set_handler_table(*handler_table);
+}
+
+
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
@@ -274,7 +254,11 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+ case FAST_SMI_ONLY_ELEMENTS: {
+ KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+ is_js_array_,
+ elements_kind_);
+ }
break;
case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@@ -302,24 +286,20 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::PrintName(StringStream* stream) {
- const char* type_name = NULL; // Make g++ happy.
+ stream->Add("ArgumentsAccessStub_");
switch (type_) {
- case READ_ELEMENT: type_name = "ReadElement"; break;
- case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
- case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
- case NEW_STRICT: type_name = "NewStrict"; break;
+ case READ_ELEMENT: stream->Add("ReadElement"); break;
+ case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
+ case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+ case NEW_STRICT: stream->Add("NewStrict"); break;
}
- stream->Add("ArgumentsAccessStub_%s", type_name);
}
void CallFunctionStub::PrintName(StringStream* stream) {
- const char* flags_name = NULL; // Make g++ happy.
- switch (flags_) {
- case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
- case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
- }
- stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
+ stream->Add("CallFunctionStub_Args%d", argc_);
+ if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
+ if (RecordCallTarget()) stream->Add("_Recording");
}
@@ -402,4 +382,29 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
}
+void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
+ Label fail;
+ if (!FLAG_trace_elements_transitions) {
+ if (to_ == FAST_ELEMENTS) {
+ if (from_ == FAST_SMI_ONLY_ELEMENTS) {
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+ } else if (from_ == FAST_DOUBLE_ELEMENTS) {
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ } else {
+ UNREACHABLE();
+ }
+ KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+ is_jsarray_,
+ FAST_ELEMENTS);
+ } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ masm->bind(&fail);
+ KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 64c89b93d..6bda5da70 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "globals.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
@@ -45,27 +46,22 @@ namespace internal {
V(Compare) \
V(CompareIC) \
V(MathPow) \
+ V(RecordWrite) \
+ V(StoreBufferOverflow) \
+ V(RegExpExec) \
V(TranscendentalCache) \
V(Instanceof) \
- /* All stubs above this line only exist in a few versions, which are */ \
- /* generated ahead of time. Therefore compiling a call to one of */ \
- /* them can't cause a new stub to be compiled, so compiling a call to */ \
- /* them is GC safe. The ones below this line exist in many variants */ \
- /* so code compiling a call to one can cause a GC. This means they */ \
- /* can't be called from other stubs, since stub generation code is */ \
- /* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
+ V(FastNewBlockContext) \
V(FastCloneShallowArray) \
- V(RevertToNumber) \
+ V(FastCloneShallowObject) \
V(ToBoolean) \
V(ToNumber) \
- V(CounterOp) \
V(ArgumentsAccess) \
- V(RegExpExec) \
V(RegExpConstructResult) \
V(NumberToString) \
V(CEntry) \
@@ -73,7 +69,9 @@ namespace internal {
V(KeyedLoadElement) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
- V(StringDictionaryNegativeLookup)
+ V(StringDictionaryLookup) \
+ V(ElementsTransitionAndStore) \
+ V(StoreArrayLiteralElement)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -121,11 +119,6 @@ class CodeStub BASE_EMBEDDED {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode();
- // Retrieve the code for the stub if already generated. Do not
- // generate the code if not already generated and instead return a
- // retry after GC Failure object.
- MUST_USE_RESULT MaybeObject* TryGetCode();
-
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -142,14 +135,35 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
+ bool CompilingCallsToThisStubIsGCSafe() {
+ bool is_pregenerated = IsPregenerated();
+ Code* code = NULL;
+ CHECK(!is_pregenerated || FindCodeInCache(&code));
+ return is_pregenerated;
+ }
+
+ // See comment above, where Instanceof is defined.
+ virtual bool IsPregenerated() { return false; }
+
+ static void GenerateStubsAheadOfTime();
+ static void GenerateFPStubs();
+
+ // Some stubs put untagged junk on the stack that cannot be scanned by the
+ // GC. This means that we must be statically sure that no GC can occur while
+ // they are running. If that is the case they should override this to return
+ // true, which will cause an assertion if we try to call something that can
+ // GC or if we try to put a stack frame on top of the junk, which would not
+ // result in a traversable stack.
+ virtual bool SometimesSetsUpAFrame() { return true; }
+
+ // Lookup the code in the (possibly custom) cache.
+ bool FindCodeInCache(Code** code_out);
+
protected:
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
- // Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
-
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
void GenerateCode(MacroAssembler* masm);
@@ -162,7 +176,11 @@ class CodeStub BASE_EMBEDDED {
void RecordCodeGeneration(Code* code, MacroAssembler* masm);
// Finish the code object after it has been generated.
- virtual void FinishCode(Code* code) { }
+ virtual void FinishCode(Handle<Code> code) { }
+
+ // Activate newly generated stub. Is called after
+ // registering stub in the stub cache.
+ virtual void Activate(Code* code) { }
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
@@ -178,9 +196,7 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
- virtual void PrintName(StringStream* stream) {
- stream->Add("%s", MajorName(MajorKey(), false));
- }
+ virtual void PrintName(StringStream* stream);
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@@ -193,9 +209,6 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
- // See comment above, where Instanceof is defined.
- bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
-
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
@@ -286,16 +299,17 @@ class ToNumberStub: public CodeStub {
class FastNewClosureStub : public CodeStub {
public:
- explicit FastNewClosureStub(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
+ explicit FastNewClosureStub(LanguageMode language_mode)
+ : language_mode_(language_mode) { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return strict_mode_; }
+ int MinorKey() { return language_mode_ == CLASSIC_MODE
+ ? kNonStrictMode : kStrictMode; }
- StrictModeFlag strict_mode_;
+ LanguageMode language_mode_;
};
@@ -304,7 +318,7 @@ class FastNewContextStub : public CodeStub {
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+ ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
@@ -317,6 +331,24 @@ class FastNewContextStub : public CodeStub {
};
+class FastNewBlockContextStub : public CodeStub {
+ public:
+ static const int kMaximumSlots = 64;
+
+ explicit FastNewBlockContextStub(int slots) : slots_(slots) {
+ ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int slots_;
+
+ Major MajorKey() { return FastNewBlockContext; }
+ int MinorKey() { return slots_; }
+};
+
+
class FastCloneShallowArrayStub : public CodeStub {
public:
// Maximum length of copied elements array.
@@ -324,14 +356,16 @@ class FastCloneShallowArrayStub : public CodeStub {
enum Mode {
CLONE_ELEMENTS,
- COPY_ON_WRITE_ELEMENTS
+ CLONE_DOUBLE_ELEMENTS,
+ COPY_ON_WRITE_ELEMENTS,
+ CLONE_ANY_ELEMENTS
};
FastCloneShallowArrayStub(Mode mode, int length)
: mode_(mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
- ASSERT(length_ >= 0);
- ASSERT(length_ <= kMaximumClonedLength);
+ ASSERT_GE(length_, 0);
+ ASSERT_LE(length_, kMaximumClonedLength);
}
void Generate(MacroAssembler* masm);
@@ -342,12 +376,32 @@ class FastCloneShallowArrayStub : public CodeStub {
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() {
- ASSERT(mode_ == 0 || mode_ == 1);
- return (length_ << 1) | mode_;
+ ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
+ return length_ * 4 + mode_;
}
};
+class FastCloneShallowObjectStub : public CodeStub {
+ public:
+ // Maximum number of properties in copied object.
+ static const int kMaximumClonedProperties = 6;
+
+ explicit FastCloneShallowObjectStub(int length) : length_(length) {
+ ASSERT_GE(length_, 0);
+ ASSERT_LE(length_, kMaximumClonedProperties);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int length_;
+
+ Major MajorKey() { return FastCloneShallowObject; }
+ int MinorKey() { return length_; }
+};
+
+
class InstanceofStub: public CodeStub {
public:
enum Flags {
@@ -410,7 +464,9 @@ class ICCompareStub: public CodeStub {
class OpField: public BitField<int, 0, 3> { };
class StateField: public BitField<int, 3, 5> { };
- virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_compare_state(state_);
+ }
virtual CodeStub::Major MajorKey() { return CompareIC; }
virtual int MinorKey();
@@ -513,7 +569,7 @@ class CompareStub: public CodeStub {
int MinorKey();
virtual int GetCodeKind() { return Code::COMPARE_IC; }
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(CompareIC::GENERIC);
}
@@ -531,11 +587,18 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
- explicit CEntryStub(int result_size)
- : result_size_(result_size), save_doubles_(false) { }
+ explicit CEntryStub(int result_size,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+ : result_size_(result_size), save_doubles_(save_doubles) { }
void Generate(MacroAssembler* masm);
- void SaveDoubles() { save_doubles_ = true; }
+
+ // The version of this stub that doesn't save doubles is generated ahead of
+ // time, so it's OK to call it from other stubs that can't cope with GC during
+ // their code generation. On machines that always have gp registers (x64) we
+ // can generate both variants ahead of time.
+ virtual bool IsPregenerated();
+ static void GenerateAheadOfTime();
private:
void GenerateCore(MacroAssembler* masm,
@@ -550,7 +613,7 @@ class CEntryStub : public CodeStub {
// Number of pointers/values returned.
const int result_size_;
- bool save_doubles_;
+ SaveFPRegsMode save_doubles_;
Major MajorKey() { return CEntry; }
int MinorKey();
@@ -571,6 +634,10 @@ class JSEntryStub : public CodeStub {
private:
Major MajorKey() { return JSEntry; }
int MinorKey() { return 0; }
+
+ virtual void FinishCode(Handle<Code> code);
+
+ int handler_offset_;
};
@@ -647,10 +714,32 @@ class CallFunctionStub: public CodeStub {
void Generate(MacroAssembler* masm);
+ virtual void FinishCode(Handle<Code> code);
+
+ static void Clear(Heap* heap, Address address);
+
+ static Object* GetCachedValue(Address address);
+
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
+ // The object that indicates an uninitialized cache.
+ static Handle<Object> UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->the_hole_value();
+ }
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static Object* RawUninitializedSentinel(Heap* heap) {
+ return heap->raw_unchecked_the_hole_value();
+ }
+
+ // The object that indicates a megamorphic state.
+ static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->undefined_value();
+ }
+
private:
int argc_;
CallFunctionFlags flags_;
@@ -658,8 +747,8 @@ class CallFunctionStub: public CodeStub {
virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
- class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
+ class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
+ class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
Major MajorKey() { return CallFunction; }
int MinorKey() {
@@ -670,6 +759,10 @@ class CallFunctionStub: public CodeStub {
bool ReceiverMightBeImplicit() {
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
}
+
+ bool RecordCallTarget() {
+ return (flags_ & RECORD_CALL_TARGET) != 0;
+ }
};
@@ -698,7 +791,6 @@ class StringCharCodeAtGenerator {
public:
StringCharCodeAtGenerator(Register object,
Register index,
- Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
@@ -706,15 +798,11 @@ class StringCharCodeAtGenerator {
StringIndexFlags index_flags)
: object_(object),
index_(index),
- scratch_(scratch),
result_(result),
receiver_not_string_(receiver_not_string),
index_not_number_(index_not_number),
index_out_of_range_(index_out_of_range),
index_flags_(index_flags) {
- ASSERT(!scratch_.is(object_));
- ASSERT(!scratch_.is(index_));
- ASSERT(!scratch_.is(result_));
ASSERT(!result_.is(object_));
ASSERT(!result_.is(index_));
}
@@ -732,7 +820,6 @@ class StringCharCodeAtGenerator {
private:
Register object_;
Register index_;
- Register scratch_;
Register result_;
Label* receiver_not_string_;
@@ -795,8 +882,7 @@ class StringCharAtGenerator {
public:
StringCharAtGenerator(Register object,
Register index,
- Register scratch1,
- Register scratch2,
+ Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
@@ -804,13 +890,12 @@ class StringCharAtGenerator {
StringIndexFlags index_flags)
: char_code_at_generator_(object,
index,
- scratch1,
- scratch2,
+ scratch,
receiver_not_string,
index_not_number,
index_out_of_range,
index_flags),
- char_from_code_generator_(scratch2, result) {}
+ char_from_code_generator_(scratch, result) {}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
@@ -934,11 +1019,13 @@ class ToBooleanStub: public CodeStub {
virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_to_boolean_state(types_.ToByte());
}
@@ -952,6 +1039,56 @@ class ToBooleanStub: public CodeStub {
Types types_;
};
+
+class ElementsTransitionAndStoreStub : public CodeStub {
+ public:
+ ElementsTransitionAndStoreStub(ElementsKind from,
+ ElementsKind to,
+ bool is_jsarray,
+ StrictModeFlag strict_mode)
+ : from_(from),
+ to_(to),
+ is_jsarray_(is_jsarray),
+ strict_mode_(strict_mode) {}
+
+ private:
+ class FromBits: public BitField<ElementsKind, 0, 8> {};
+ class ToBits: public BitField<ElementsKind, 8, 8> {};
+ class IsJSArrayBits: public BitField<bool, 16, 8> {};
+ class StrictModeBits: public BitField<StrictModeFlag, 24, 8> {};
+
+ Major MajorKey() { return ElementsTransitionAndStore; }
+ int MinorKey() {
+ return FromBits::encode(from_) |
+ ToBits::encode(to_) |
+ IsJSArrayBits::encode(is_jsarray_) |
+ StrictModeBits::encode(strict_mode_);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ ElementsKind from_;
+ ElementsKind to_;
+ bool is_jsarray_;
+ StrictModeFlag strict_mode_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+};
+
+
+class StoreArrayLiteralElementStub : public CodeStub {
+ public:
+ explicit StoreArrayLiteralElementStub() {}
+
+ private:
+ Major MajorKey() { return StoreArrayLiteralElement; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index cdc9ba155..ceea7b9fe 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
+ int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
ASSERT(result_size_ == 1 || result_size_ == 2);
- int result = save_doubles_ ? 1 : 0;
#ifdef _WIN64
return result | ((result_size_ == 1) ? 0 : 2);
#else
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index e551abfb1..5360d3ef3 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -81,4 +81,19 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#error Unsupported target architecture.
#endif
+namespace v8 {
+namespace internal {
+
+class ElementsTransitionGenerator : public AllStatic {
+ public:
+ static void GenerateSmiOnlyToObject(MacroAssembler* masm);
+ static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
+ static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
+};
+
+} } // namespace v8::internal
+
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/weakmap.js b/deps/v8/src/collection.js
index 5fb515107..d11612681 100644
--- a/deps/v8/src/weakmap.js
+++ b/deps/v8/src/collection.js
@@ -26,12 +26,95 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// const $Object = global.Object;
+const $Set = global.Set;
+const $Map = global.Map;
const $WeakMap = global.WeakMap;
-// -------------------------------------------------------------------
+//-------------------------------------------------------------------
+
+// Global sentinel to be used instead of undefined keys, which are not
+// supported internally but required for Harmony sets and maps.
+var undefined_sentinel = {};
+
+
+function SetConstructor() {
+ if (%_IsConstructCall()) {
+ %SetInitialize(this);
+ } else {
+ return new $Set();
+ }
+}
+
+
+function SetAdd(key) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ return %SetAdd(this, key);
+}
+
+
+function SetHas(key) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ return %SetHas(this, key);
+}
+
+
+function SetDelete(key) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ return %SetDelete(this, key);
+}
+
+
+function MapConstructor() {
+ if (%_IsConstructCall()) {
+ %MapInitialize(this);
+ } else {
+ return new $Map();
+ }
+}
+
+
+function MapGet(key) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ return %MapGet(this, key);
+}
+
+
+function MapSet(key, value) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ return %MapSet(this, key, value);
+}
+
+
+function MapHas(key) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ return !IS_UNDEFINED(%MapGet(this, key));
+}
+
+
+function MapDelete(key) {
+ if (IS_UNDEFINED(key)) {
+ key = undefined_sentinel;
+ }
+ if (!IS_UNDEFINED(%MapGet(this, key))) {
+ %MapSet(this, key, void 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
function WeakMapConstructor() {
if (%_IsConstructCall()) {
@@ -82,6 +165,30 @@ function WeakMapDelete(key) {
(function () {
%CheckIsBootstrapping();
+
+ // Set up the Set and Map constructor function.
+ %SetCode($Set, SetConstructor);
+ %SetCode($Map, MapConstructor);
+
+ // Set up the constructor property on the Set and Map prototype object.
+ %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+ %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the Set prototype object.
+ InstallFunctions($Set.prototype, DONT_ENUM, $Array(
+ "add", SetAdd,
+ "has", SetHas,
+ "delete", SetDelete
+ ));
+
+ // Set up the non-enumerable functions on the Map prototype object.
+ InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+ "get", MapGet,
+ "set", MapSet,
+ "has", MapHas,
+ "delete", MapDelete
+ ));
+
// Set up the WeakMap constructor function.
%SetCode($WeakMap, WeakMapConstructor);
@@ -89,7 +196,7 @@ function WeakMapDelete(key) {
%SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
// Set up the non-enumerable functions on the WeakMap prototype object.
- InstallFunctionsOnHiddenPrototype($WeakMap.prototype, DONT_ENUM, $Array(
+ InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
"get", WeakMapGet,
"set", WeakMapSet,
"has", WeakMapHas,
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 28e833a49..82cc2231a 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "assembler.h"
#include "compilation-cache.h"
#include "serialize.h"
@@ -250,7 +251,8 @@ void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<String> source,
Handle<Context> context,
- StrictModeFlag strict_mode) {
+ LanguageMode language_mode,
+ int scope_position) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
@@ -259,7 +261,8 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(*source, *context, strict_mode);
+ result = table->LookupEval(
+ *source, *context, language_mode, scope_position);
if (result->IsSharedFunctionInfo()) {
break;
}
@@ -269,7 +272,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<SharedFunctionInfo>
function_info(SharedFunctionInfo::cast(result), isolate());
if (generation != 0) {
- Put(source, context, function_info);
+ Put(source, context, function_info, scope_position);
}
isolate()->counters()->compilation_cache_hits()->Increment();
return function_info;
@@ -283,27 +286,31 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
MaybeObject* CompilationCacheEval::TryTablePut(
Handle<String> source,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position) {
Handle<CompilationCacheTable> table = GetFirstTable();
- return table->PutEval(*source, *context, *function_info);
+ return table->PutEval(*source, *context, *function_info, scope_position);
}
Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position) {
CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, context, function_info),
+ TryTablePut(
+ source, context, function_info, scope_position),
CompilationCacheTable);
}
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info) {
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position) {
HandleScope scope(isolate());
- SetFirstTable(TablePut(source, context, function_info));
+ SetFirstTable(TablePut(source, context, function_info, scope_position));
}
@@ -389,16 +396,20 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<String> source,
Handle<Context> context,
bool is_global,
- StrictModeFlag strict_mode) {
+ LanguageMode language_mode,
+ int scope_position) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
Handle<SharedFunctionInfo> result;
if (is_global) {
- result = eval_global_.Lookup(source, context, strict_mode);
+ result = eval_global_.Lookup(
+ source, context, language_mode, scope_position);
} else {
- result = eval_contextual_.Lookup(source, context, strict_mode);
+ ASSERT(scope_position != RelocInfo::kNoPosition);
+ result = eval_contextual_.Lookup(
+ source, context, language_mode, scope_position);
}
return result;
}
@@ -427,16 +438,18 @@ void CompilationCache::PutScript(Handle<String> source,
void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- Handle<SharedFunctionInfo> function_info) {
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position) {
if (!IsEnabled()) {
return;
}
HandleScope scope(isolate());
if (is_global) {
- eval_global_.Put(source, context, function_info);
+ eval_global_.Put(source, context, function_info, scope_position);
} else {
- eval_contextual_.Put(source, context, function_info);
+ ASSERT(scope_position != RelocInfo::kNoPosition);
+ eval_contextual_.Put(source, context, function_info, scope_position);
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 4339d2264..31f290968 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -123,7 +123,19 @@ class CompilationCacheScript : public CompilationSubCache {
};
-// Sub-cache for eval scripts.
+// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
+// in global contexts and one for eval calls in other contexts. The cache
+// considers the following pieces of information when checking for matching
+// entries:
+// 1. The source string.
+// 2. The shared function info of the calling function.
+// 3. Whether the source should be compiled as strict code or as non-strict
+// code.
+// Note: Currently there are clients of CompileEval that always compile
+// non-strict code even if the calling function is a strict mode function.
+// More specifically these are the CompileString, DebugEvaluate and
+// DebugEvaluateGlobal runtime functions.
+// 4. The start position of the calling scope.
class CompilationCacheEval: public CompilationSubCache {
public:
CompilationCacheEval(Isolate* isolate, int generations)
@@ -131,23 +143,27 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
- StrictModeFlag strict_mode);
+ LanguageMode language_mode,
+ int scope_position);
void Put(Handle<String> source,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
@@ -198,7 +214,8 @@ class CompilationCache {
Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- StrictModeFlag strict_mode);
+ LanguageMode language_mode,
+ int scope_position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -215,7 +232,8 @@ class CompilationCache {
void PutEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- Handle<SharedFunctionInfo> function_info);
+ Handle<SharedFunctionInfo> function_info,
+ int scope_position);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h
new file mode 100644
index 000000000..3b9c59ea5
--- /dev/null
+++ b/deps/v8/src/compiler-intrinsics.h
@@ -0,0 +1,77 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILER_INTRINSICS_H_
+#define V8_COMPILER_INTRINSICS_H_
+
+namespace v8 {
+namespace internal {
+
+class CompilerIntrinsics {
+ public:
+ // Returns number of zero bits preceding least significant 1 bit.
+ // Undefined for zero value.
+ INLINE(static int CountTrailingZeros(uint32_t value));
+
+ // Returns number of zero bits following most significant 1 bit.
+ // Undefined for zero value.
+ INLINE(static int CountLeadingZeros(uint32_t value));
+};
+
+#ifdef __GNUC__
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+ return __builtin_ctz(value);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+ return __builtin_clz(value);
+}
+
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+ unsigned long result; //NOLINT
+ _BitScanForward(&result, static_cast<long>(value)); //NOLINT
+ return static_cast<int>(result);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+ unsigned long result; //NOLINT
+ _BitScanReverse(&result, static_cast<long>(value)); //NOLINT
+ return 31 - static_cast<int>(result);
+}
+
+#else
+#error Unsupported compiler
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_COMPILER_INTRINSICS_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 5e1c4a978..16ccfa0cf 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -36,6 +36,7 @@
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
+#include "isolate-inl.h"
#include "lithium.h"
#include "liveedit.h"
#include "parser.h"
@@ -52,13 +53,13 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script)
: isolate_(script->GetIsolate()),
- flags_(0),
+ flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
scope_(NULL),
+ global_scope_(NULL),
script_(script),
extension_(NULL),
pre_parse_data_(NULL),
- supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(NONOPT);
}
@@ -66,14 +67,15 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
: isolate_(shared_info->GetIsolate()),
- flags_(IsLazy::encode(true)),
+ flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
+ global_scope_(NULL),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL),
- supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(BASE);
}
@@ -81,15 +83,16 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
: isolate_(closure->GetIsolate()),
- flags_(IsLazy::encode(true)),
+ flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
+ global_scope_(NULL),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
- supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(BASE);
}
@@ -167,7 +170,11 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed.
- if (!info->AllowOptimize()) info->DisableOptimization();
+ if (!info->AllowOptimize()) {
+ info->DisableOptimization();
+ } else if (info->IsOptimizable()) {
+ info->EnableDeoptimizationSupport();
+ }
// In case we are not optimizing simply return the code from
// the full code generator.
@@ -275,7 +282,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
Handle<Context> global_context(info->closure()->context()->global_context());
- TypeFeedbackOracle oracle(code, global_context);
+ TypeFeedbackOracle oracle(code, global_context, info->isolate());
HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
@@ -308,9 +315,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
static bool GenerateCode(CompilationInfo* info) {
- return V8::UseCrankshaft() ?
- MakeCrankshaftCode(info) :
- FullCodeGenerator::MakeCode(info);
+ return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
+ FullCodeGenerator::MakeCode(info) :
+ MakeCrankshaftCode(info);
}
@@ -328,8 +335,7 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
// the compilation info is set if compilation succeeded.
bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) {
- Handle<SerializedScopeInfo> scope_info =
- SerializedScopeInfo::Create(info->scope());
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
info->shared_info()->set_scope_info(*scope_info);
}
return succeeded;
@@ -371,8 +377,14 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
-
- if (!ParserApi::Parse(info)) return Handle<SharedFunctionInfo>::null();
+ ParsingFlags flags = kNoParsingFlags;
+ if (info->pre_parse_data() != NULL ||
+ String::cast(script->source())->length() > FLAG_min_preparse_length) {
+ flags = kAllowLazy;
+ }
+ if (!ParserApi::Parse(info, flags)) {
+ return Handle<SharedFunctionInfo>::null();
+ }
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
@@ -397,7 +409,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->name(),
lit->materialized_literal_count(),
info->code(),
- SerializedScopeInfo::Create(info->scope()));
+ ScopeInfo::Create(info->scope()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@@ -447,7 +459,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
int line_offset,
int column_offset,
v8::Extension* extension,
- ScriptDataImpl* input_pre_data,
+ ScriptDataImpl* pre_data,
Handle<Object> script_data,
NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
@@ -478,23 +490,12 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// for small sources, odds are that there aren't many functions
// that would be compiled lazily anyway, so we skip the preparse step
// in that case too.
- ScriptDataImpl* pre_data = input_pre_data;
- bool harmony_block_scoping = natives != NATIVES_CODE &&
- FLAG_harmony_block_scoping;
- if (pre_data == NULL
- && source_length >= FLAG_min_preparse_length) {
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- pre_data = ParserApi::PartialPreParse(&stream,
- extension,
- harmony_block_scoping);
- } else {
- GenericStringUC16CharacterStream stream(source, 0, source->length());
- pre_data = ParserApi::PartialPreParse(&stream,
- extension,
- harmony_block_scoping);
- }
+ int flags = kNoParsingFlags;
+ if ((natives == NATIVES_CODE) || FLAG_allow_natives_syntax) {
+ flags |= kAllowNativesSyntax;
+ }
+ if (natives != NATIVES_CODE && FLAG_harmony_scoping) {
+ flags |= EXTENDED_MODE;
}
// Create a script object describing the script to be compiled.
@@ -520,11 +521,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
}
-
- // Get rid of the pre-parsing data (if necessary).
- if (input_pre_data == NULL && pre_data != NULL) {
- delete pre_data;
- }
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -535,7 +531,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- StrictModeFlag strict_mode) {
+ LanguageMode language_mode,
+ int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
@@ -551,7 +548,8 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
result = compilation_cache->LookupEval(source,
context,
is_global,
- strict_mode);
+ language_mode,
+ scope_position);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
@@ -559,16 +557,20 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
- if (strict_mode == kStrictMode) info.MarkAsStrictMode();
+ info.SetLanguageMode(language_mode);
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
- CompilationCache* compilation_cache = isolate->compilation_cache();
- // If caller is strict mode, the result must be strict as well,
- // but not the other way around. Consider:
+ // If caller is strict mode, the result must be in strict mode or
+ // extended mode as well, but not the other way around. Consider:
// eval("'use strict'; ...");
- ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
- compilation_cache->PutEval(source, context, is_global, result);
+ ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
+ // If caller is in extended mode, the result must also be in
+ // extended mode.
+ ASSERT(language_mode != EXTENDED_MODE ||
+ result->is_extended_mode());
+ compilation_cache->PutEval(
+ source, context, is_global, result, scope_position);
}
}
@@ -591,17 +593,16 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
isolate->counters()->total_compile_size()->Increment(compiled_size);
// Generate the AST for the lazily compiled function.
- if (ParserApi::Parse(info)) {
+ if (ParserApi::Parse(info, kNoParsingFlags)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
HistogramTimerScope timer(isolate->counters()->compile_lazy());
- // After parsing we know function's strict mode. Remember it.
- if (info->function()->strict_mode()) {
- shared->set_strict_mode(true);
- info->MarkAsStrictMode();
- }
+ // After parsing we know the function's language mode. Remember it.
+ LanguageMode language_mode = info->function()->language_mode();
+ info->SetLanguageMode(language_mode);
+ shared->set_language_mode(language_mode);
// Compile the code.
if (!MakeCode(info)) {
@@ -620,7 +621,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) {
- ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
+ ASSERT(shared->scope_info() != ScopeInfo::Empty());
function->ReplaceCode(*code);
} else {
// Update the shared function info with the compiled code and the
@@ -628,8 +629,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By settting the code object last we avoid this.
- Handle<SerializedScopeInfo> scope_info =
- SerializedScopeInfo::Create(info->scope());
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!function.is_null()) {
@@ -681,7 +681,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
CompilationInfo info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
- if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
+ info.SetLanguageMode(literal->scope()->language_mode());
LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
// Determine if the function can be lazily compiled. This is necessary to
@@ -692,7 +692,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive(info.isolate());
- Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
// Generate code
if (FLAG_lazy && allow_lazy) {
@@ -701,7 +701,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null());
- scope_info = SerializedScopeInfo::Create(info.scope());
+ scope_info = ScopeInfo::Create(info.scope());
} else {
return Handle<SharedFunctionInfo>::null();
}
@@ -733,8 +733,8 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script) {
- function_info->set_length(lit->num_parameters());
- function_info->set_formal_parameter_count(lit->num_parameters());
+ function_info->set_length(lit->parameter_count());
+ function_info->set_formal_parameter_count(lit->parameter_count());
function_info->set_script(*script);
function_info->set_function_token_position(lit->function_token_position());
function_info->set_start_position(lit->start_position());
@@ -747,7 +747,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- function_info->set_strict_mode(lit->strict_mode());
+ function_info->set_language_mode(lit->language_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 69ab27d9c..47eaeea3b 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -52,10 +52,15 @@ class CompilationInfo BASE_EMBEDDED {
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
- bool is_strict_mode() const { return IsStrictMode::decode(flags_); }
+ bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
+ bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
+ LanguageMode language_mode() const {
+ return LanguageModeField::decode(flags_);
+ }
bool is_in_loop() const { return IsInLoop::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
+ Scope* global_scope() const { return global_scope_; }
Handle<Code> code() const { return code_; }
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -73,11 +78,11 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
- void MarkAsStrictMode() {
- flags_ |= IsStrictMode::encode(true);
- }
- StrictModeFlag StrictMode() {
- return is_strict_mode() ? kStrictMode : kNonStrictMode;
+ void SetLanguageMode(LanguageMode language_mode) {
+ ASSERT(this->language_mode() == CLASSIC_MODE ||
+ this->language_mode() == language_mode ||
+ language_mode == EXTENDED_MODE);
+ flags_ = LanguageModeField::update(flags_, language_mode);
}
void MarkAsInLoop() {
ASSERT(is_lazy());
@@ -97,6 +102,10 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(scope_ == NULL);
scope_ = scope;
}
+ void SetGlobalScope(Scope* global_scope) {
+ ASSERT(global_scope_ == NULL);
+ global_scope_ = global_scope;
+ }
void SetCode(Handle<Code> code) { code_ = code; }
void SetExtension(v8::Extension* extension) {
ASSERT(!is_lazy());
@@ -114,6 +123,19 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(IsOptimizing());
osr_ast_id_ = osr_ast_id;
}
+ void MarkCompilingForDebugging(Handle<Code> current_code) {
+ ASSERT(mode_ != OPTIMIZE);
+ ASSERT(current_code->kind() == Code::FUNCTION);
+ flags_ |= IsCompilingForDebugging::encode(true);
+ if (current_code->is_compiled_optimizable()) {
+ EnableDeoptimizationSupport();
+ } else {
+ mode_ = CompilationInfo::NONOPT;
+ }
+ }
+ bool IsCompilingForDebugging() {
+ return IsCompilingForDebugging::decode(flags_);
+ }
bool has_global_object() const {
return !closure().is_null() && (closure()->context()->global() != NULL);
@@ -133,10 +155,12 @@ class CompilationInfo BASE_EMBEDDED {
void DisableOptimization();
// Deoptimization support.
- bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
+ bool HasDeoptimizationSupport() const {
+ return SupportsDeoptimization::decode(flags_);
+ }
void EnableDeoptimizationSupport() {
ASSERT(IsOptimizable());
- supports_deoptimization_ = true;
+ flags_ |= SupportsDeoptimization::encode(true);
}
// Determine whether or not we can adaptively optimize.
@@ -171,8 +195,9 @@ class CompilationInfo BASE_EMBEDDED {
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
- if (!shared_info_.is_null() && shared_info_->strict_mode()) {
- MarkAsStrictMode();
+ if (!shared_info_.is_null()) {
+ ASSERT(language_mode() == CLASSIC_MODE);
+ SetLanguageMode(shared_info_->language_mode());
}
}
@@ -192,9 +217,14 @@ class CompilationInfo BASE_EMBEDDED {
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
- class IsStrictMode: public BitField<bool, 4, 1> {};
+ class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
+ // Is this code being compiled with support for deoptimization..
+ class SupportsDeoptimization: public BitField<bool, 7, 1> {};
+ // If compiling for debugging produce just full code matching the
+ // initial mode setting.
+ class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
unsigned flags_;
@@ -205,6 +235,8 @@ class CompilationInfo BASE_EMBEDDED {
// The scope of the function literal as a convenience. Set to indicate
// that scopes have been analyzed.
Scope* scope_;
+ // The global scope provided as a convenience.
+ Scope* global_scope_;
// The compiled code.
Handle<Code> code_;
@@ -223,7 +255,6 @@ class CompilationInfo BASE_EMBEDDED {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
- bool supports_deoptimization_;
int osr_ast_id_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -267,7 +298,8 @@ class Compiler : public AllStatic {
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- StrictModeFlag strict_mode);
+ LanguageMode language_mode,
+ int scope_position);
// Compile from function info (used for lazy compilation). Returns true on
// success and false if the compilation resulted in a stack overflow.
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 4f93abdff..76784bd70 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) {
Handle<Object> Context::Lookup(Handle<String> name,
ContextLookupFlags flags,
- int* index_,
+ int* index,
PropertyAttributes* attributes,
BindingFlags* binding_flags) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
- *index_ = -1;
+ *index = -1;
*attributes = ABSENT;
*binding_flags = MISSING_BINDING;
@@ -110,70 +110,51 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n");
}
- // Check extension/with/global object.
- if (!context->IsBlockContext() && context->has_extension()) {
- if (context->IsCatchContext()) {
- // Catch contexts have the variable name in the extension slot.
- if (name->Equals(String::cast(context->extension()))) {
- if (FLAG_trace_contexts) {
- PrintF("=> found in catch context\n");
- }
- *index_ = Context::THROWN_OBJECT_INDEX;
- *attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
- return context;
- }
+ // 1. Check global objects, subjects of with, and extension objects.
+ if (context->IsGlobalContext() ||
+ context->IsWithContext() ||
+ (context->IsFunctionContext() && context->has_extension())) {
+ Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
+ // Context extension objects needs to behave as if they have no
+ // prototype. So even if we want to follow prototype chains, we need
+ // to only do a local lookup for context extension objects.
+ if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+ object->IsJSContextExtensionObject()) {
+ *attributes = object->GetLocalPropertyAttribute(*name);
} else {
- ASSERT(context->IsGlobalContext() ||
- context->IsFunctionContext() ||
- context->IsWithContext());
- // Global, function, and with contexts may have an object in the
- // extension slot.
- Handle<JSObject> extension(JSObject::cast(context->extension()),
- isolate);
- // Context extension objects needs to behave as if they have no
- // prototype. So even if we want to follow prototype chains, we
- // need to only do a local lookup for context extension objects.
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
- extension->IsJSContextExtensionObject()) {
- *attributes = extension->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = extension->GetPropertyAttribute(*name);
- }
- if (*attributes != ABSENT) {
- // property found
- if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*extension));
- }
- return extension;
+ *attributes = object->GetPropertyAttribute(*name);
+ }
+ if (*attributes != ABSENT) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(*object));
}
+ return object;
}
}
- // Check serialized scope information of functions and blocks. Only
- // functions can have parameters, and a function name.
+ // 2. Check the context proper if it has slots.
if (context->IsFunctionContext() || context->IsBlockContext()) {
- // We may have context-local slots. Check locals in the context.
- Handle<SerializedScopeInfo> scope_info;
+ // Use serialized scope information of functions and blocks to search
+ // for the context index.
+ Handle<ScopeInfo> scope_info;
if (context->IsFunctionContext()) {
- scope_info = Handle<SerializedScopeInfo>(
+ scope_info = Handle<ScopeInfo>(
context->closure()->shared()->scope_info(), isolate);
} else {
- ASSERT(context->IsBlockContext());
- scope_info = Handle<SerializedScopeInfo>(
- SerializedScopeInfo::cast(context->extension()), isolate);
+ scope_info = Handle<ScopeInfo>(
+ ScopeInfo::cast(context->extension()), isolate);
}
-
- Variable::Mode mode;
- int index = scope_info->ContextSlotIndex(*name, &mode);
- ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
- if (index >= 0) {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
+ ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
+ if (slot_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
- index, mode);
+ slot_index, mode);
}
- *index_ = index;
+ *index = slot_index;
// Note: Fixed context slots are statically allocated by the compiler.
// Statically allocated variables always have a statically known mode,
// which is the mode with which they were declared when added to the
@@ -181,23 +162,31 @@ Handle<Object> Context::Lookup(Handle<String> name,
// declared variables that were introduced through declaration nodes)
// must not appear here.
switch (mode) {
- case Variable::INTERNAL: // Fall through.
- case Variable::VAR:
+ case INTERNAL: // Fall through.
+ case VAR:
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
break;
- case Variable::LET:
+ case LET:
*attributes = NONE;
- *binding_flags = MUTABLE_CHECK_INITIALIZED;
+ *binding_flags = (init_flag == kNeedsInitialization)
+ ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
+ break;
+ case CONST:
+ *attributes = READ_ONLY;
+ *binding_flags = (init_flag == kNeedsInitialization)
+ ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
break;
- case Variable::CONST:
+ case CONST_HARMONY:
*attributes = READ_ONLY;
- *binding_flags = IMMUTABLE_CHECK_INITIALIZED;
+ *binding_flags = (init_flag == kNeedsInitialization)
+ ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
+ IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
- case Variable::DYNAMIC:
- case Variable::DYNAMIC_GLOBAL:
- case Variable::DYNAMIC_LOCAL:
- case Variable::TEMPORARY:
+ case DYNAMIC:
+ case DYNAMIC_GLOBAL:
+ case DYNAMIC_LOCAL:
+ case TEMPORARY:
UNREACHABLE();
break;
}
@@ -206,22 +195,37 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the slot corresponding to the intermediate context holding
// only the function name variable.
- if (follow_context_chain) {
- int index = scope_info->FunctionContextSlotIndex(*name);
- if (index >= 0) {
+ if (follow_context_chain && context->IsFunctionContext()) {
+ VariableMode mode;
+ int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
+ if (function_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
- index);
+ function_index);
}
- *index_ = index;
+ *index = function_index;
*attributes = READ_ONLY;
- *binding_flags = IMMUTABLE_IS_INITIALIZED;
+ ASSERT(mode == CONST || mode == CONST_HARMONY);
+ *binding_flags = (mode == CONST)
+ ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
return context;
}
}
+
+ } else if (context->IsCatchContext()) {
+ // Catch contexts have the variable name in the extension slot.
+ if (name->Equals(String::cast(context->extension()))) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found in catch context\n");
+ }
+ *index = Context::THROWN_OBJECT_INDEX;
+ *attributes = NONE;
+ *binding_flags = MUTABLE_IS_INITIALIZED;
+ return context;
+ }
}
- // Proceed with the previous context.
+ // 3. Prepare to continue with the previous (next outermost) context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
} else {
@@ -236,68 +240,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
-bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
- Context* context = this;
-
- // Check that there is no local with the given name in contexts
- // before the global context and check that there are no context
- // extension objects (conservative check for with statements).
- while (!context->IsGlobalContext()) {
- // Check if the context is a catch or with context, or has introduced
- // bindings by calling non-strict eval.
- if (context->has_extension()) return false;
-
- // Not a with context so it must be a function context.
- ASSERT(context->IsFunctionContext());
-
- // Check non-parameter locals.
- Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info());
- Variable::Mode mode;
- int index = scope_info->ContextSlotIndex(*name, &mode);
- ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
- if (index >= 0) return false;
-
- // Check parameter locals.
- int param_index = scope_info->ParameterIndex(*name);
- if (param_index >= 0) return false;
-
- // Check context only holding the function name variable.
- index = scope_info->FunctionContextSlotIndex(*name);
- if (index >= 0) return false;
- context = context->previous();
- }
-
- // No local or potential with statement found so the variable is
- // global unless it is shadowed by an eval-introduced variable.
- return true;
-}
-
-
-void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
- bool* outer_scope_calls_non_strict_eval) {
- // Skip up the context chain checking all the function contexts to see
- // whether they call eval.
- Context* context = this;
- while (!context->IsGlobalContext()) {
- if (context->IsFunctionContext()) {
- Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info());
- if (scope_info->CallsEval()) {
- *outer_scope_calls_eval = true;
- if (!scope_info->IsStrictMode()) {
- // No need to go further since the answers will not change from
- // here.
- *outer_scope_calls_non_strict_eval = true;
- return;
- }
- }
- }
- context = context->previous();
- }
-}
-
-
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
#ifdef DEBUG
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 505f86c8c..10ef33d1a 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -46,24 +46,43 @@ enum ContextLookupFlags {
// ES5 10.2 defines lexical environments with mutable and immutable bindings.
// Immutable bindings have two states, initialized and uninitialized, and
-// their state is changed by the InitializeImmutableBinding method.
+// their state is changed by the InitializeImmutableBinding method. The
+// BindingFlags enum represents information if a binding has definitely been
+// initialized. A mutable binding does not need to be checked and thus has
+// the BindingFlag MUTABLE_IS_INITIALIZED.
+//
+// There are two possibilities for immutable bindings
+// * 'const' declared variables. They are initialized when evaluating the
+// corresponding declaration statement. They need to be checked for being
+// initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
+// * The function name of a named function literal. The binding is immediately
+// initialized when entering the function and thus does not need to be
+// checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
+// Accessing an uninitialized binding produces the undefined value.
//
// The harmony proposal for block scoped bindings also introduces the
-// uninitialized state for mutable bindings. A 'let' declared variable
-// is a mutable binding that is created uninitalized upon activation of its
-// lexical environment and it is initialized when evaluating its declaration
-// statement. Var declared variables are mutable bindings that are
-// immediately initialized upon creation. The BindingFlags enum represents
-// information if a binding has definitely been initialized. 'const' declared
-// variables are created as uninitialized immutable bindings.
-
-// In harmony mode accessing an uninitialized binding produces a reference
-// error.
+// uninitialized state for mutable bindings.
+// * A 'let' declared variable. They are initialized when evaluating the
+// corresponding declaration statement. They need to be checked for being
+// initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
+// * A 'var' declared variable. It is initialized immediately upon creation
+// and thus doesn't need to be checked. It gets the flag
+// MUTABLE_IS_INITIALIZED.
+// * Catch bound variables, function parameters and variables introduced by
+// function declarations are initialized immediately and do not need to be
+// checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
+// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
+// an uninitialized binding produces a reference error.
+//
+// In V8 uninitialized bindings are set to the hole value upon creation and set
+// to a different value upon initialization.
enum BindingFlags {
MUTABLE_IS_INITIALIZED,
MUTABLE_CHECK_INITIALIZED,
IMMUTABLE_IS_INITIALIZED,
IMMUTABLE_CHECK_INITIALIZED,
+ IMMUTABLE_IS_INITIALIZED_HARMONY,
+ IMMUTABLE_CHECK_INITIALIZED_HARMONY,
MISSING_BINDING
};
@@ -134,9 +153,13 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+ V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
+ to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
+ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
+ V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
+ V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -192,7 +215,8 @@ class Context: public FixedArray {
PREVIOUS_INDEX,
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
- // (with contexts), or the variable name (catch contexts).
+ // (with contexts), or the variable name (catch contexts), the serialized
+ // scope info (block contexts).
EXTENSION_INDEX,
GLOBAL_INDEX,
MIN_CONTEXT_SLOTS,
@@ -252,9 +276,12 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+ TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
+ PROXY_ENUMERATE,
+ RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@@ -330,12 +357,6 @@ class Context: public FixedArray {
// Mark the global context with out of memory.
inline void mark_out_of_memory();
- // The exception holder is the object used as a with object in
- // the implementation of a catch block.
- bool is_exception_holder(Object* object) {
- return IsCatchContext() && extension() == object;
- }
-
// A global context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -355,46 +376,28 @@ class Context: public FixedArray {
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context.
- // There are 4 possible outcomes:
- //
- // 1) index_ >= 0 && result->IsContext():
- // most common case, the result is a Context, and index is the
- // context slot index, and the slot exists.
- // attributes == READ_ONLY for the function name variable, NONE otherwise.
+ // There are three possibilities:
//
- // 2) index_ >= 0 && result->IsJSObject():
- // the result is the JSObject arguments object, the index is the parameter
- // index, i.e., key into the arguments object, and the property exists.
- // attributes != ABSENT.
+ // 1) result->IsContext():
+ // The binding was found in a context. *index is always the
+ // non-negative slot index. *attributes is NONE for var and let
+ // declarations, READ_ONLY for const declarations (never ABSENT).
//
- // 3) index_ < 0 && result->IsJSObject():
- // the result is the JSObject extension context or the global object,
- // and the name is the property name, and the property exists.
- // attributes != ABSENT.
+ // 2) result->IsJSObject():
+ // The binding was found as a named property in a context extension
+ // object (i.e., was introduced via eval), as a property on the subject
+ // of with, or as a property of the global object. *index is -1 and
+ // *attributes is not ABSENT.
//
- // 4) index_ < 0 && result.is_null():
- // there was no context found with the corresponding property.
- // attributes == ABSENT.
+ // 3) result.is_null():
+ // There was no binding found, *index is always -1 and *attributes is
+ // always ABSENT.
Handle<Object> Lookup(Handle<String> name,
ContextLookupFlags flags,
- int* index_,
+ int* index,
PropertyAttributes* attributes,
BindingFlags* binding_flags);
- // Determine if a local variable with the given name exists in a
- // context. Do not consider context extension objects. This is
- // used for compiling code using eval. If the context surrounding
- // the eval call does not have a local variable with this name and
- // does not contain a with statement the property is global unless
- // it is shadowed by a property in an extension object introduced by
- // eval.
- bool GlobalIfNotShadowedByEval(Handle<String> name);
-
- // Determine if any function scope in the context call eval and if
- // any of those calls are in non-strict mode.
- void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
- bool* outer_scope_calls_non_strict_eval);
-
// Code generation support.
static int SlotOffset(int index) {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 41cf0d54c..b098a1c29 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -46,15 +46,15 @@
namespace v8 {
namespace internal {
-static inline double JunkStringValue() {
- return std::numeric_limits<double>::quiet_NaN();
+inline double JunkStringValue() {
+ return BitCast<double, uint64_t>(kQuietNaNMask);
}
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
-static inline unsigned int FastD2UI(double x) {
+inline unsigned int FastD2UI(double x) {
// There is no unsigned version of lrint, so there is no fast path
// in this function as there is in FastD2I. Using lrint doesn't work
// for values of 2^31 and above.
@@ -80,7 +80,7 @@ static inline unsigned int FastD2UI(double x) {
}
-static inline double DoubleToInteger(double x) {
+inline double DoubleToInteger(double x) {
if (isnan(x)) return 0;
if (!isfinite(x) || x == 0) return x;
return (x >= 0) ? floor(x) : ceil(x);
@@ -103,9 +103,9 @@ int32_t DoubleToInt32(double x) {
template <class Iterator, class EndMark>
-static bool SubStringEquals(Iterator* current,
- EndMark end,
- const char* substring) {
+bool SubStringEquals(Iterator* current,
+ EndMark end,
+ const char* substring) {
ASSERT(**current == *substring);
for (substring++; *substring != '\0'; substring++) {
++*current;
@@ -119,9 +119,9 @@ static bool SubStringEquals(Iterator* current,
// Returns true if a nonspace character has been found and false if the
// end was been reached before finding a nonspace character.
template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
- Iterator* current,
- EndMark end) {
+inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+ Iterator* current,
+ EndMark end) {
while (*current != end) {
if (!unicode_cache->IsWhiteSpace(**current)) return true;
++*current;
@@ -132,11 +132,11 @@ static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- bool negative,
- bool allow_trailing_junk) {
+double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ bool negative,
+ bool allow_trailing_junk) {
ASSERT(current != end);
// Skip leading 0s.
@@ -235,10 +235,10 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
template <class Iterator, class EndMark>
-static double InternalStringToInt(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int radix) {
+double InternalStringToInt(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ int radix) {
const bool allow_trailing_junk = true;
const double empty_string_val = JunkStringValue();
@@ -430,11 +430,11 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
// 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position).
template <class Iterator, class EndMark>
-static double InternalStringToDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int flags,
- double empty_string_val) {
+double InternalStringToDouble(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ int flags,
+ double empty_string_val) {
// To make sure that iterator dereferencing is valid the following
// convention is used:
// 1. Each '++current' statement is followed by check for equality to 'end'.
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index e51ad6501..70559c9e9 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -28,8 +28,6 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
-#include <limits>
-
#include "utils.h"
namespace v8 {
@@ -47,14 +45,14 @@ class UnicodeCache;
const int kMaxSignificantDigits = 772;
-static inline bool isDigit(int x, int radix) {
+inline bool isDigit(int x, int radix) {
return (x >= '0' && x <= '9' && x < '0' + radix)
|| (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
|| (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
}
-static inline double SignedZero(bool negative) {
+inline double SignedZero(bool negative) {
return negative ? -0.0 : 0.0;
}
@@ -63,16 +61,16 @@ static inline double SignedZero(bool negative) {
// rounding towards zero.
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
-static inline int FastD2I(double x) {
+inline int FastD2I(double x) {
// The static_cast convertion from double to int used to be slow, but
// as new benchmarks show, now it is much faster than lrint().
return static_cast<int>(x);
}
-static inline unsigned int FastD2UI(double x);
+inline unsigned int FastD2UI(double x);
-static inline double FastI2D(int x) {
+inline double FastI2D(int x) {
// There is no rounding involved in converting an integer to a
// double, so this code should compile to a few instructions without
// any FPU pipeline stalls.
@@ -80,7 +78,7 @@ static inline double FastI2D(int x) {
}
-static inline double FastUI2D(unsigned x) {
+inline double FastUI2D(unsigned x) {
// There is no rounding involved in converting an unsigned integer to a
// double, so this code should compile to a few instructions without
// any FPU pipeline stalls.
@@ -89,15 +87,15 @@ static inline double FastUI2D(unsigned x) {
// This function should match the exact semantics of ECMA-262 9.4.
-static inline double DoubleToInteger(double x);
+inline double DoubleToInteger(double x);
// This function should match the exact semantics of ECMA-262 9.5.
-static inline int32_t DoubleToInt32(double x);
+inline int32_t DoubleToInt32(double x);
// This function should match the exact semantics of ECMA-262 9.6.
-static inline uint32_t DoubleToUint32(double x) {
+inline uint32_t DoubleToUint32(double x) {
return static_cast<uint32_t>(DoubleToInt32(x));
}
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index adefba732..8fbc876da 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
#include "d8.h"
#include "d8-debug.h"
@@ -367,3 +368,5 @@ void KeyboardThread::Run() {
} // namespace v8
+
+#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 55f0d4c2a..9eccc7e4f 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions) {
-#ifndef V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
bool FLAG_debugger = i::FLAG_debugger;
#else
bool FLAG_debugger = false;
-#endif // V8_SHARED
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
HandleScope handle_scope;
TryCatch try_catch;
options.script_executed = true;
@@ -178,7 +178,8 @@ bool Shell::ExecuteString(Handle<String> source,
// If all went well and the result wasn't undefined then print
// the returned value.
v8::String::Utf8Value str(result);
- fwrite(*str, sizeof(**str), str.length(), stdout);
+ size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
+ (void) count; // Silence GCC-4.5.x "unused result" warning.
printf("\n");
}
return true;
@@ -594,6 +595,7 @@ void Shell::InstallUtilityScript() {
Context::Scope utility_scope(utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
@@ -792,22 +794,47 @@ void Shell::Exit(int exit_code) {
#ifndef V8_SHARED
+struct CounterAndKey {
+ Counter* counter;
+ const char* key;
+};
+
+
+int CompareKeys(const void* a, const void* b) {
+ return strcmp(static_cast<const CounterAndKey*>(a)->key,
+ static_cast<const CounterAndKey*>(b)->key);
+}
+
+
void Shell::OnExit() {
if (console != NULL) console->Close();
if (i::FLAG_dump_counters) {
- printf("+----------------------------------------+-------------+\n");
- printf("| Name | Value |\n");
- printf("+----------------------------------------+-------------+\n");
+ int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
- Counter* counter = i.CurrentValue();
+ number_of_counters++;
+ }
+ CounterAndKey* counters = new CounterAndKey[number_of_counters];
+ int j = 0;
+ for (CounterMap::Iterator i(counter_map_); i.More(); i.Next(), j++) {
+ counters[j].counter = i.CurrentValue();
+ counters[j].key = i.CurrentKey();
+ }
+ qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
+ printf("+--------------------------------------------+-------------+\n");
+ printf("| Name | Value |\n");
+ printf("+--------------------------------------------+-------------+\n");
+ for (j = 0; j < number_of_counters; j++) {
+ Counter* counter = counters[j].counter;
+ const char* key = counters[j].key;
if (counter->is_histogram()) {
- printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
- printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
+ printf("| c:%-40s | %11i |\n", key, counter->count());
+ printf("| t:%-40s | %11i |\n", key, counter->sample_total());
} else {
- printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
+ printf("| %-42s | %11i |\n", key, counter->count());
}
}
- printf("+----------------------------------------+-------------+\n");
+ printf("+--------------------------------------------+-------------+\n");
+ delete [] counters;
}
if (counters_file_ != NULL)
delete counters_file_;
@@ -816,7 +843,7 @@ void Shell::OnExit() {
static FILE* FOpen(const char* path, const char* mode) {
-#if (defined(_WIN32) || defined(_WIN64))
+#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
FILE* result;
if (fopen_s(&result, path, mode) == 0) {
return result;
@@ -900,9 +927,6 @@ void Shell::RunShell() {
#ifndef V8_SHARED
console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
- if (i::FLAG_debugger) {
- printf("JavaScript debugger enabled\n");
- }
console->Open();
while (true) {
i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@@ -1253,14 +1277,22 @@ int Shell::RunMain(int argc, char* argv[]) {
Locker lock;
HandleScope scope;
Persistent<Context> context = CreateEvaluationContext();
+ if (options.last_run) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_ = context;
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ // If the interactive debugger is enabled make sure to activate
+ // it before running the files passed on the command line.
+ if (i::FLAG_debugger) {
+ InstallUtilityScript();
+ }
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+ }
{
Context::Scope cscope(context);
options.isolate_sources[0].Execute();
}
- if (options.last_run) {
- // Keep using the same context in the interactive shell
- evaluation_context_ = context;
- } else {
+ if (!options.last_run) {
context.Dispose();
}
@@ -1331,9 +1363,11 @@ int Shell::Main(int argc, char* argv[]) {
if (( options.interactive_shell
|| !options.script_executed )
&& !options.test_shell ) {
-#ifndef V8_SHARED
- InstallUtilityScript();
-#endif // V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ if (!i::FLAG_debugger) {
+ InstallUtilityScript();
+ }
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
RunShell();
}
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 70186cfbd..bdc23a20e 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -65,7 +65,7 @@
'sources': [ 'd8-readline.cc' ],
}],
[ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
- or OS=="openbsd" or OS=="solaris")', {
+ or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 3009037e7..86b8c8106 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -26,10 +26,11 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
String.prototype.startsWith = function (str) {
- if (str.length > this.length)
+ if (str.length > this.length) {
return false;
+ }
return this.substr(0, str.length) == str;
-}
+};
function log10(num) {
return Math.log(num)/Math.log(10);
@@ -52,8 +53,9 @@ function GetCompletions(global, last, full) {
for (var i = 0; i < parts.length; i++) {
var part = parts[i];
var next = current[part];
- if (!next)
+ if (!next) {
return [];
+ }
current = next;
}
var result = [];
@@ -63,8 +65,9 @@ function GetCompletions(global, last, full) {
var properties = mirror.properties();
for (var i = 0; i < properties.length; i++) {
var name = properties[i].name();
- if (typeof name === 'string' && name.startsWith(last))
+ if (typeof name === 'string' && name.startsWith(last)) {
result.push(name);
+ }
}
current = ToInspectableObject(current.__proto__);
}
@@ -114,7 +117,7 @@ Debug.State = {
displaySourceStartLine: -1,
displaySourceEndLine: -1,
currentSourceLine: -1
-}
+};
var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd_line = '';
@@ -150,7 +153,7 @@ function DebugMessageDetails(message) {
}
function DebugEventDetails(response) {
- details = {text:'', running:false}
+ details = {text:'', running:false};
// Get the running state.
details.running = response.running();
@@ -217,7 +220,7 @@ function DebugEventDetails(response) {
case 'afterCompile':
if (trace_compile) {
- result = 'Source ' + body.script.name + ' compiled:\n'
+ result = 'Source ' + body.script.name + ' compiled:\n';
var source = body.script.source;
if (!(source[source.length - 1] == '\n')) {
result += source;
@@ -237,7 +240,7 @@ function DebugEventDetails(response) {
}
return details;
-};
+}
function SourceInfo(body) {
@@ -279,7 +282,7 @@ function SourceUnderline(source_text, position) {
// Return the source line text with the underline beneath.
return source_text + '\n' + underline;
-};
+}
// Converts a text command to a JSON request.
@@ -289,7 +292,7 @@ function DebugCommandToJSONRequest(cmd_line) {
print("sending: '" + result + "'");
}
return result;
-};
+}
function DebugRequest(cmd_line) {
@@ -514,7 +517,7 @@ function DebugRequest(cmd_line) {
DebugRequest.prototype.JSONRequest = function() {
return this.request_;
-}
+};
function RequestPacket(command) {
@@ -536,14 +539,14 @@ RequestPacket.prototype.toJSONProtocol = function() {
json += ',"arguments":';
// Encode the arguments part.
if (this.arguments.toJSONProtocol) {
- json += this.arguments.toJSONProtocol()
+ json += this.arguments.toJSONProtocol();
} else {
json += SimpleObjectToJSON_(this.arguments);
}
}
json += '}';
return json;
-}
+};
DebugRequest.prototype.createRequest = function(command) {
@@ -1310,7 +1313,7 @@ DebugRequest.prototype.lolMakeListRequest =
}
return request;
-}
+};
function extractObjId(args) {
@@ -1499,7 +1502,7 @@ DebugRequest.prototype.traceCommand_ = function(args) {
} else {
throw new Error('Invalid trace arguments.');
}
-}
+};
// Handle the help command.
DebugRequest.prototype.helpCommand_ = function(args) {
@@ -1608,7 +1611,7 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('');
print('disconnect|exit|quit - disconnects and quits the debugger');
print('help - prints this help information');
-}
+};
function formatHandleReference_(value) {
@@ -1623,7 +1626,7 @@ function formatHandleReference_(value) {
function formatObject_(value, include_properties) {
var result = '';
result += formatHandleReference_(value);
- result += ', type: object'
+ result += ', type: object';
result += ', constructor ';
var ctor = value.constructorFunctionValue();
result += formatHandleReference_(ctor);
@@ -1943,7 +1946,7 @@ function roundNumber(num, length) {
// Convert a JSON response to text for display in a text based debugger.
function DebugResponseDetails(response) {
- details = {text:'', running:false}
+ details = { text: '', running: false };
try {
if (!response.success()) {
@@ -2308,7 +2311,7 @@ function DebugResponseDetails(response) {
}
return details;
-};
+}
/**
@@ -2334,7 +2337,7 @@ function ProtocolPackage(json) {
*/
ProtocolPackage.prototype.type = function() {
return this.packet_.type;
-}
+};
/**
@@ -2343,7 +2346,7 @@ ProtocolPackage.prototype.type = function() {
*/
ProtocolPackage.prototype.event = function() {
return this.packet_.event;
-}
+};
/**
@@ -2352,7 +2355,7 @@ ProtocolPackage.prototype.event = function() {
*/
ProtocolPackage.prototype.requestSeq = function() {
return this.packet_.request_seq;
-}
+};
/**
@@ -2361,27 +2364,27 @@ ProtocolPackage.prototype.requestSeq = function() {
*/
ProtocolPackage.prototype.running = function() {
return this.packet_.running ? true : false;
-}
+};
ProtocolPackage.prototype.success = function() {
return this.packet_.success ? true : false;
-}
+};
ProtocolPackage.prototype.message = function() {
return this.packet_.message;
-}
+};
ProtocolPackage.prototype.command = function() {
return this.packet_.command;
-}
+};
ProtocolPackage.prototype.body = function() {
return this.packet_.body;
-}
+};
ProtocolPackage.prototype.bodyValue = function(index) {
@@ -2390,12 +2393,12 @@ ProtocolPackage.prototype.bodyValue = function(index) {
} else {
return new ProtocolValue(this.packet_.body, this);
}
-}
+};
ProtocolPackage.prototype.body = function() {
return this.packet_.body;
-}
+};
ProtocolPackage.prototype.lookup = function(handle) {
@@ -2405,12 +2408,12 @@ ProtocolPackage.prototype.lookup = function(handle) {
} else {
return new ProtocolReference(handle);
}
-}
+};
ProtocolPackage.prototype.raw_json = function() {
return this.raw_json_;
-}
+};
function ProtocolValue(value, packet) {
@@ -2425,7 +2428,7 @@ function ProtocolValue(value, packet) {
*/
ProtocolValue.prototype.type = function() {
return this.value_.type;
-}
+};
/**
@@ -2434,7 +2437,7 @@ ProtocolValue.prototype.type = function() {
*/
ProtocolValue.prototype.field = function(name) {
return this.value_[name];
-}
+};
/**
@@ -2444,7 +2447,7 @@ ProtocolValue.prototype.field = function(name) {
ProtocolValue.prototype.isPrimitive = function() {
return this.isUndefined() || this.isNull() || this.isBoolean() ||
this.isNumber() || this.isString();
-}
+};
/**
@@ -2453,7 +2456,7 @@ ProtocolValue.prototype.isPrimitive = function() {
*/
ProtocolValue.prototype.handle = function() {
return this.value_.handle;
-}
+};
/**
@@ -2462,7 +2465,7 @@ ProtocolValue.prototype.handle = function() {
*/
ProtocolValue.prototype.isUndefined = function() {
return this.value_.type == 'undefined';
-}
+};
/**
@@ -2471,7 +2474,7 @@ ProtocolValue.prototype.isUndefined = function() {
*/
ProtocolValue.prototype.isNull = function() {
return this.value_.type == 'null';
-}
+};
/**
@@ -2480,7 +2483,7 @@ ProtocolValue.prototype.isNull = function() {
*/
ProtocolValue.prototype.isBoolean = function() {
return this.value_.type == 'boolean';
-}
+};
/**
@@ -2489,7 +2492,7 @@ ProtocolValue.prototype.isBoolean = function() {
*/
ProtocolValue.prototype.isNumber = function() {
return this.value_.type == 'number';
-}
+};
/**
@@ -2498,7 +2501,7 @@ ProtocolValue.prototype.isNumber = function() {
*/
ProtocolValue.prototype.isString = function() {
return this.value_.type == 'string';
-}
+};
/**
@@ -2508,7 +2511,7 @@ ProtocolValue.prototype.isString = function() {
ProtocolValue.prototype.isObject = function() {
return this.value_.type == 'object' || this.value_.type == 'function' ||
this.value_.type == 'error' || this.value_.type == 'regexp';
-}
+};
/**
@@ -2518,7 +2521,7 @@ ProtocolValue.prototype.isObject = function() {
ProtocolValue.prototype.constructorFunctionValue = function() {
var ctor = this.value_.constructorFunction;
return this.packet_.lookup(ctor.ref);
-}
+};
/**
@@ -2528,7 +2531,7 @@ ProtocolValue.prototype.constructorFunctionValue = function() {
ProtocolValue.prototype.protoObjectValue = function() {
var proto = this.value_.protoObject;
return this.packet_.lookup(proto.ref);
-}
+};
/**
@@ -2537,7 +2540,7 @@ ProtocolValue.prototype.protoObjectValue = function() {
*/
ProtocolValue.prototype.propertyCount = function() {
return this.value_.properties ? this.value_.properties.length : 0;
-}
+};
/**
@@ -2547,7 +2550,7 @@ ProtocolValue.prototype.propertyCount = function() {
ProtocolValue.prototype.propertyName = function(index) {
var property = this.value_.properties[index];
return property.name;
-}
+};
/**
@@ -2562,7 +2565,7 @@ ProtocolValue.prototype.propertyIndex = function(name) {
}
}
return null;
-}
+};
/**
@@ -2572,7 +2575,7 @@ ProtocolValue.prototype.propertyIndex = function(name) {
ProtocolValue.prototype.propertyValue = function(index) {
var property = this.value_.properties[index];
return this.packet_.lookup(property.ref);
-}
+};
/**
@@ -2581,12 +2584,12 @@ ProtocolValue.prototype.propertyValue = function(index) {
*/
ProtocolValue.prototype.value = function() {
return this.value_.value;
-}
+};
ProtocolValue.prototype.valueString = function() {
return this.value_.text;
-}
+};
function ProtocolReference(handle) {
@@ -2596,7 +2599,7 @@ function ProtocolReference(handle) {
ProtocolReference.prototype.handle = function() {
return this.handle_;
-}
+};
function MakeJSONPair_(name, value) {
@@ -2667,7 +2670,7 @@ function StringToJSON_(value) {
// Convert control character to unicode escape sequence.
return '\\u00' +
'0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
- '0' // TODO %NumberToRadixString(mapped % 16, 16);
+ '0'; // TODO %NumberToRadixString(mapped % 16, 16)
})
+ '"';
}
@@ -2738,7 +2741,7 @@ function SimpleObjectToJSON_(object) {
if (property_value === null) {
property_value_json = 'null';
} else if (typeof property_value.toJSONProtocol == 'function') {
- property_value_json = property_value.toJSONProtocol(true)
+ property_value_json = property_value.toJSONProtocol(true);
} else if (property_value.constructor.name == 'Array'){
property_value_json = SimpleArrayToJSON_(property_value);
} else {
@@ -2789,7 +2792,7 @@ function SimpleArrayToJSON_(array) {
}
var elem = array[i];
if (elem.toJSONProtocol) {
- json += elem.toJSONProtocol(true)
+ json += elem.toJSONProtocol(true);
} else if (typeof(elem) === 'object') {
json += SimpleObjectToJSON_(elem);
} else if (typeof(elem) === 'boolean') {
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index ccefce576..999009e86 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -294,8 +294,8 @@ function TimeInYear(year) {
}
-var ymd_from_time_cache = [$NaN, $NaN, $NaN];
-var ymd_from_time_cached_time = $NaN;
+var ymd_from_time_cache = [1970, 0, 1];
+var ymd_from_time_cached_time = 0;
function YearFromTime(t) {
if (t !== ymd_from_time_cached_time) {
@@ -304,7 +304,7 @@ function YearFromTime(t) {
}
%DateYMDFromTime(t, ymd_from_time_cache);
- ymd_from_time_cached_time = t
+ ymd_from_time_cached_time = t;
}
return ymd_from_time_cache[0];
@@ -316,7 +316,7 @@ function MonthFromTime(t) {
return $NaN;
}
%DateYMDFromTime(t, ymd_from_time_cache);
- ymd_from_time_cached_time = t
+ ymd_from_time_cached_time = t;
}
return ymd_from_time_cache[1];
@@ -329,7 +329,7 @@ function DateFromTime(t) {
}
%DateYMDFromTime(t, ymd_from_time_cache);
- ymd_from_time_cached_time = t
+ ymd_from_time_cached_time = t;
}
return ymd_from_time_cache[2];
@@ -351,13 +351,12 @@ function MakeDay(year, month, date) {
date = TO_INTEGER_MAP_MINUS_ZERO(date);
if (year < kMinYear || year > kMaxYear ||
- month < kMinMonth || month > kMaxMonth ||
- date < kMinDate || date > kMaxDate) {
+ month < kMinMonth || month > kMaxMonth) {
return $NaN;
}
- // Now we rely on year, month and date being SMIs.
- return %DateMakeDay(year, month, date);
+ // Now we rely on year and month being SMIs.
+ return %DateMakeDay(year, month) + date - 1;
}
@@ -446,8 +445,9 @@ var Date_cache = {
minutes = argc > 4 ? ToNumber(minutes) : 0;
seconds = argc > 5 ? ToNumber(seconds) : 0;
ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
+ year = (!NUMBER_IS_NAN(year) &&
+ 0 <= TO_INTEGER(year) &&
+ TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
value = TimeClip(UTC(MakeDate(day, time)));
@@ -460,7 +460,8 @@ var Date_cache = {
var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
function TwoDigitString(value) {
@@ -476,8 +477,10 @@ function DateString(time) {
}
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
+var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
+ 'Thursday', 'Friday', 'Saturday'];
+var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
+ 'July', 'August', 'September', 'October', 'November', 'December'];
function LongDateString(time) {
@@ -557,8 +560,9 @@ function DateUTC(year, month, date, hours, minutes, seconds, ms) {
minutes = argc > 4 ? ToNumber(minutes) : 0;
seconds = argc > 5 ? ToNumber(seconds) : 0;
ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
+ year = (!NUMBER_IS_NAN(year) &&
+ 0 <= TO_INTEGER(year) &&
+ TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
@@ -778,7 +782,10 @@ function DateSetTime(ms) {
function DateSetMilliseconds(ms) {
var t = LocalTime(DATE_VALUE(this));
ms = ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+ var time = MakeTime(HOUR_FROM_TIME(t),
+ MIN_FROM_TIME(t),
+ SEC_FROM_TIME(t),
+ ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -787,7 +794,10 @@ function DateSetMilliseconds(ms) {
function DateSetUTCMilliseconds(ms) {
var t = DATE_VALUE(this);
ms = ToNumber(ms);
- var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+ var time = MakeTime(HOUR_FROM_TIME(t),
+ MIN_FROM_TIME(t),
+ SEC_FROM_TIME(t),
+ ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -978,9 +988,10 @@ function PadInt(n, digits) {
}
+// ECMA 262 - 15.9.5.43
function DateToISOString() {
var t = DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
+ if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
var year = this.getUTCFullYear();
var year_string;
if (year >= 0 && year <= 9999) {
@@ -1062,7 +1073,7 @@ function SetUpDate() {
// Set up non-enumerable functions of the Date prototype object and
// set their names.
- InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
+ InstallFunctions($Date.prototype, DONT_ENUM, $Array(
"toString", DateToString,
"toDateString", DateToDateString,
"toTimeString", DateToTimeString,
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index d254ee569..8cbe0b362 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -286,7 +286,7 @@ ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
copy.condition_ = this.condition_;
copy.ignoreCount_ = this.ignoreCount_;
return copy;
-}
+};
ScriptBreakPoint.prototype.number = function() {
@@ -335,13 +335,13 @@ ScriptBreakPoint.prototype.actual_locations = function() {
locations.push(this.break_points_[i].actual_location);
}
return locations;
-}
+};
ScriptBreakPoint.prototype.update_positions = function(line, column) {
this.line_ = line;
this.column_ = column;
-}
+};
ScriptBreakPoint.prototype.hit_count = function() {
@@ -477,9 +477,10 @@ ScriptBreakPoint.prototype.clear = function () {
// break points set in this script.
function UpdateScriptBreakPoints(script) {
for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
- script_break_points[i].matchesScript(script)) {
- script_break_points[i].set(script);
+ var break_point = script_break_points[i];
+ if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName) &&
+ break_point.matchesScript(script)) {
+ break_point.set(script);
}
}
}
@@ -585,7 +586,7 @@ Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
var script = %FunctionGetScript(func);
var script_offset = %FunctionGetScriptSourcePosition(func);
return script.locationFromLine(opt_line, opt_column, script_offset);
-}
+};
// Returns the character position in a script based on a line number and an
@@ -593,7 +594,7 @@ Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
var location = script.locationFromLine(opt_line, opt_column);
return location ? location.position : null;
-}
+};
Debug.findBreakPoint = function(break_point_number, remove) {
@@ -627,7 +628,7 @@ Debug.findBreakPointActualLocations = function(break_point_number) {
}
}
return [];
-}
+};
Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
@@ -677,8 +678,9 @@ Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
{
break_point = MakeBreakPoint(position);
break_point.setCondition(condition);
- if (!enabled)
+ if (!enabled) {
break_point.disable();
+ }
var scripts = this.scripts();
for (var i = 0; i < scripts.length; i++) {
if (script_id == scripts[i].id) {
@@ -771,7 +773,7 @@ Debug.findScriptBreakPoint = function(break_point_number, remove) {
}
}
return script_break_point;
-}
+};
// Sets a breakpoint in a script identified through id or name at the
@@ -799,7 +801,7 @@ Debug.setScriptBreakPoint = function(type, script_id_or_name,
}
return script_break_point.number();
-}
+};
Debug.setScriptBreakPointById = function(script_id,
@@ -808,7 +810,7 @@ Debug.setScriptBreakPointById = function(script_id,
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
script_id, opt_line, opt_column,
opt_condition, opt_groupId);
-}
+};
Debug.setScriptBreakPointByName = function(script_name,
@@ -817,7 +819,7 @@ Debug.setScriptBreakPointByName = function(script_name,
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
script_name, opt_line, opt_column,
opt_condition, opt_groupId);
-}
+};
Debug.setScriptBreakPointByRegExp = function(script_regexp,
@@ -826,7 +828,7 @@ Debug.setScriptBreakPointByRegExp = function(script_regexp,
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
script_regexp, opt_line, opt_column,
opt_condition, opt_groupId);
-}
+};
Debug.enableScriptBreakPoint = function(break_point_number) {
@@ -841,13 +843,15 @@ Debug.disableScriptBreakPoint = function(break_point_number) {
};
-Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
+Debug.changeScriptBreakPointCondition = function(
+ break_point_number, condition) {
var script_break_point = this.findScriptBreakPoint(break_point_number, false);
script_break_point.setCondition(condition);
};
-Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+Debug.changeScriptBreakPointIgnoreCount = function(
+ break_point_number, ignoreCount) {
if (ignoreCount < 0) {
throw new Error('Invalid argument');
}
@@ -858,12 +862,12 @@ Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCou
Debug.scriptBreakPoints = function() {
return script_break_points;
-}
+};
Debug.clearStepping = function() {
%ClearStepping();
-}
+};
Debug.setBreakOnException = function() {
return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
@@ -940,7 +944,7 @@ ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
var count = opt_count ? %ToNumber(opt_count) : 1;
return %PrepareStep(this.break_id, action, count);
-}
+};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
opt_additional_context) {
@@ -960,8 +964,9 @@ ExecutionState.prototype.threadCount = function() {
ExecutionState.prototype.frame = function(opt_index) {
// If no index supplied return the selected frame.
if (opt_index == null) opt_index = this.selected_frame;
- if (opt_index < 0 || opt_index >= this.frameCount())
+ if (opt_index < 0 || opt_index >= this.frameCount()) {
throw new Error('Illegal frame index.');
+ }
return new FrameMirror(this.break_id, opt_index);
};
@@ -1088,12 +1093,12 @@ ExceptionEvent.prototype.eventType = function() {
ExceptionEvent.prototype.exception = function() {
return this.exception_;
-}
+};
ExceptionEvent.prototype.uncaught = function() {
return this.uncaught_;
-}
+};
ExceptionEvent.prototype.func = function() {
@@ -1185,7 +1190,7 @@ CompileEvent.prototype.toJSONProtocol = function() {
o.body.script = this.script_;
return o.toJSONProtocol();
-}
+};
function MakeNewFunctionEvent(func) {
@@ -1241,7 +1246,7 @@ ScriptCollectedEvent.prototype.toJSONProtocol = function() {
o.body = {};
o.body.script = { id: this.id() };
return o.toJSONProtocol();
-}
+};
function MakeScriptObject_(script, include_source) {
@@ -1258,18 +1263,18 @@ function MakeScriptObject_(script, include_source) {
o.source = script.source();
}
return o;
-};
+}
function DebugCommandProcessor(exec_state, opt_is_running) {
this.exec_state_ = exec_state;
this.running_ = opt_is_running || false;
-};
+}
DebugCommandProcessor.prototype.processDebugRequest = function (request) {
return this.processDebugJSONRequest(request);
-}
+};
function ProtocolMessage(request) {
@@ -1297,13 +1302,13 @@ ProtocolMessage.prototype.setOption = function(name, value) {
this.options_ = {};
}
this.options_[name] = value;
-}
+};
ProtocolMessage.prototype.failed = function(message) {
this.success = false;
this.message = message;
-}
+};
ProtocolMessage.prototype.toJSONProtocol = function() {
@@ -1351,7 +1356,7 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
}
json.running = this.running;
return JSON.stringify(json);
-}
+};
DebugCommandProcessor.prototype.createResponse = function(request) {
@@ -1359,7 +1364,8 @@ DebugCommandProcessor.prototype.createResponse = function(request) {
};
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(
+ json_request) {
var request; // Current request.
var response; // Generated response.
try {
@@ -1646,7 +1652,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Add the break point number to the response.
response.body = { type: type,
- breakpoint: break_point_number }
+ breakpoint: break_point_number };
// Add break point information to the response.
if (break_point instanceof ScriptBreakPoint) {
@@ -1660,7 +1666,8 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
response.body.type = 'scriptRegExp';
response.body.script_regexp = break_point.script_regexp_object().source;
} else {
- throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
+ throw new Error("Internal error: Unexpected breakpoint type: " +
+ break_point.type());
}
response.body.line = break_point.line();
response.body.column = break_point.column();
@@ -1672,7 +1679,8 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
};
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
+ request, response) {
// Check for legal request.
if (!request.arguments) {
response.failed('Missing arguments');
@@ -1709,10 +1717,11 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, res
if (!IS_UNDEFINED(ignoreCount)) {
Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
}
-}
+};
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
+ request, response) {
// Check for legal request.
if (!request.arguments) {
response.failed('Missing arguments');
@@ -1743,10 +1752,11 @@ DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request,
// Add the cleared break point numbers to the response.
response.body = { breakpoints: cleared_break_points };
-}
+};
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
+ request, response) {
// Check for legal request.
if (!request.arguments) {
response.failed('Missing arguments');
@@ -1766,11 +1776,12 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, resp
Debug.clearBreakPoint(break_point);
// Add the cleared break point number to the response.
- response.body = { breakpoint: break_point }
-}
+ response.body = { breakpoint: break_point };
+};
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
+ request, response) {
var array = [];
for (var i = 0; i < script_break_points.length; i++) {
var break_point = script_break_points[i];
@@ -1785,7 +1796,7 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, resp
condition: break_point.condition(),
ignoreCount: break_point.ignoreCount(),
actual_locations: break_point.actual_locations()
- }
+ };
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
description.type = 'scriptId';
@@ -1797,7 +1808,8 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, resp
description.type = 'scriptRegExp';
description.script_regexp = break_point.script_regexp_object().source;
} else {
- throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
+ throw new Error("Internal error: Unexpected breakpoint type: " +
+ break_point.type());
}
array.push(description);
}
@@ -1806,15 +1818,15 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, resp
breakpoints: array,
breakOnExceptions: Debug.isBreakOnException(),
breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
- }
-}
+ };
+};
DebugCommandProcessor.prototype.disconnectRequest_ =
function(request, response) {
Debug.disableAllBreakPoints();
this.continueRequest_(request, response);
-}
+};
DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
@@ -1859,10 +1871,11 @@ DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
// Add the cleared break point number to the response.
response.body = { 'type': type, 'enabled': enabled };
-}
+};
-DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.backtraceRequest_ = function(
+ request, response) {
// Get the number of frames.
var total_frames = this.exec_state_.frameCount();
@@ -1870,12 +1883,12 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
if (total_frames == 0) {
response.body = {
totalFrames: total_frames
- }
+ };
return;
}
// Default frame range to include in backtrace.
- var from_index = 0
+ var from_index = 0;
var to_index = kDefaultBacktraceLength;
// Get the range from the arguments.
@@ -1888,7 +1901,7 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
}
if (request.arguments.bottom) {
var tmp_index = total_frames - from_index;
- from_index = total_frames - to_index
+ from_index = total_frames - to_index;
to_index = tmp_index;
}
if (from_index < 0 || to_index < 0) {
@@ -1914,7 +1927,7 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
toFrame: to_index,
totalFrames: total_frames,
frames: frames
- }
+ };
};
@@ -1938,8 +1951,8 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
- // Get the frame for which the scope or scopes are requested. With no frameNumber
- // argument use the currently selected frame.
+ // Get the frame for which the scope or scopes are requested.
+ // With no frameNumber argument use the currently selected frame.
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
frame_index = request.arguments.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
@@ -1949,7 +1962,7 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
} else {
return this.exec_state_.frame();
}
-}
+};
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
@@ -1972,7 +1985,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
toScope: total_scopes,
totalScopes: total_scopes,
scopes: scopes
- }
+ };
};
@@ -2217,7 +2230,8 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (!IS_UNDEFINED(request.arguments.types)) {
types = %ToNumber(request.arguments.types);
if (isNaN(types) || types < 0) {
- return response.failed('Invalid types "' + request.arguments.types + '"');
+ return response.failed('Invalid types "' +
+ request.arguments.types + '"');
}
}
@@ -2286,7 +2300,7 @@ DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
var details = %GetThreadDetails(this.exec_state_.break_id, i);
var thread_info = { current: details[0],
id: details[1]
- }
+ };
threads.push(thread_info);
}
@@ -2294,7 +2308,7 @@ DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
response.body = {
totalThreads: total_threads,
threads: threads
- }
+ };
};
@@ -2306,7 +2320,7 @@ DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
response.body = {
V8Version: %GetV8Version()
- }
+ };
};
@@ -2322,7 +2336,8 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.changeLiveRequest_ = function(
+ request, response) {
if (!Debug.LiveEdit) {
return response.failed('LiveEdit feature is not supported');
}
@@ -2393,7 +2408,7 @@ DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
response.body.flags.push({ name: name, value: value });
}
}
-}
+};
DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
@@ -2499,7 +2514,7 @@ DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
// running.
DebugCommandProcessor.prototype.isRunning = function() {
return this.running_;
-}
+};
DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
@@ -2515,7 +2530,7 @@ function NumberToHex8Str(n) {
n = n >>> 4;
}
return r;
-};
+}
/**
@@ -2591,7 +2606,7 @@ function ValueToProtocolValue_(value, mirror_serializer) {
case 'string':
case 'number':
json = value;
- break
+ break;
default:
json = null;
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index a229d39c3..c654dfbd2 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -40,6 +40,7 @@
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
+#include "isolate-inl.h"
#include "list.h"
#include "messages.h"
#include "natives.h"
@@ -86,19 +87,13 @@ static void PrintLn(v8::Local<v8::Value> value) {
static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
- Code);
+ return isolate->stub_cache()->ComputeCallDebugBreak(argc, kind);
}
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate,
- isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
- Code);
+ return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
}
@@ -401,15 +396,15 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
- Handle<Code> code(Code::GetCodeFromTargetAddress(target));
- if (code->is_call_stub() || code->is_keyed_call_stub()) {
+ Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+ if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
- code->kind());
+ Handle<Code> stub = ComputeCallDebugPrepareStepIn(
+ target_code->arguments_count(), target_code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@@ -419,7 +414,7 @@ void BreakLocationIterator::PrepareStepIn() {
#ifdef DEBUG
// All the following stuff is needed only for assertion checks so the code
// is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = code;
+ Handle<Code> maybe_call_function_stub = target_code;
if (IsDebugBreak()) {
Address original_target = original_rinfo()->target_address();
maybe_call_function_stub =
@@ -436,8 +431,9 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in through CallFunction stub should also be prepared by caller of
// this function (Debug::PrepareStep) which should flood target function
// with breakpoints.
- ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
- || is_call_function_stub);
+ ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+ target_code->is_inline_cache_stub() ||
+ is_call_function_stub);
#endif
}
}
@@ -474,11 +470,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address();
- Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+ Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
- Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
+ Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
}
}
@@ -772,7 +768,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Execute the shared function in the debugger context.
Handle<Context> context = isolate->global_context();
- bool caught_exception = false;
+ bool caught_exception;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
@@ -1103,14 +1099,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
- bool caught_exception = false;
- const int argc = 2;
- Object** argv[argc] = {
- break_id.location(),
- reinterpret_cast<Object**>(break_point_object.location())
- };
+ bool caught_exception;
+ Handle<Object> argv[] = { break_id, break_point_object };
Handle<Object> result = Execution::TryCall(check_break_point,
- isolate_->js_builtins_object(), argc, argv, &caught_exception);
+ isolate_->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
@@ -1575,7 +1570,7 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
if (code->kind() == Code::STUB) {
ASSERT(code->major_key() == CodeStub::CallFunction);
Handle<Code> result =
- Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
+ Isolate::Current()->builtins()->CallFunctionStub_DebugBreak();
return result;
}
@@ -1726,46 +1721,218 @@ void Debug::ClearStepNext() {
}
+// Helper function to compile full code for debugging. This code will
+// have debug break slots and deoptimization
+// information. Deoptimization information is required in case that an
+// optimized version of this function is still activated on the
+// stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version - that is flags which can
+// change the code generated. The current method of mapping from
+// already compiled full code without debug break slots to full code
+// with debug break slots depends on the generated code is otherwise
+// exactly the same.
+static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
+ Handle<Code> current_code) {
+ ASSERT(!current_code->has_debug_break_slots());
+
+ CompilationInfo info(shared);
+ info.MarkCompilingForDebugging(current_code);
+ ASSERT(!info.shared_info()->is_compiled());
+ ASSERT(!info.isolate()->has_pending_exception());
+
+ // Use compile lazy which will end up compiling the full code in the
+ // configuration configured above.
+ bool result = Compiler::CompileLazy(&info);
+ ASSERT(result != Isolate::Current()->has_pending_exception());
+ info.isolate()->clear_pending_exception();
+#if DEBUG
+ if (result) {
+ Handle<Code> new_code(shared->code());
+ ASSERT(new_code->has_debug_break_slots());
+ ASSERT(current_code->is_compiled_optimizable() ==
+ new_code->is_compiled_optimizable());
+ ASSERT(current_code->instruction_size() <= new_code->instruction_size());
+ }
+#endif
+ return result;
+}
+
+
void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
- AssertNoAllocation no_allocation;
- Builtins* builtins = isolate_->builtins();
- Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
-
- // Find all non-optimized code functions with activation frames on
- // the stack.
- List<JSFunction*> active_functions(100);
- for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->function()->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(frame->function());
- if (function->code()->kind() == Code::FUNCTION)
- active_functions.Add(function);
+ Handle<Code> lazy_compile =
+ Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+
+ // Keep the list of activated functions in a handlified list as it
+ // is used both in GC and non-GC code.
+ List<Handle<JSFunction> > active_functions(100);
+
+ {
+ // We are going to iterate heap to find all functions without
+ // debug break slots.
+ isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+
+ // Ensure no GC in this scope as we are comparing raw pointer
+ // values and performing a heap iteration.
+ AssertNoAllocation no_allocation;
+
+ // Find all non-optimized code functions with activation frames
+ // on the stack. This includes functions which have optimized
+ // activations (including inlined functions) on the stack as the
+ // non-optimized code is needed for the lazy deoptimization.
+ for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized()) {
+ List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+ frame->GetFunctions(&functions);
+ for (int i = 0; i < functions.length(); i++) {
+ if (!functions[i]->shared()->code()->has_debug_break_slots()) {
+ active_functions.Add(Handle<JSFunction>(functions[i]));
+ }
+ }
+ } else if (frame->function()->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(frame->function());
+ if (function->code()->kind() == Code::FUNCTION &&
+ !function->code()->has_debug_break_slots()) {
+ active_functions.Add(Handle<JSFunction>(function));
+ }
+ }
+ }
+
+ // Sort the functions on the object pointer value to prepare for
+ // the binary search below.
+ active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
+
+ // Scan the heap for all non-optimized functions which has no
+ // debug break slots.
+ HeapIterator iterator;
+ HeapObject* obj = NULL;
+ while (((obj = iterator.next()) != NULL)) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ if (function->shared()->allows_lazy_compilation() &&
+ function->shared()->script()->IsScript() &&
+ function->code()->kind() == Code::FUNCTION &&
+ !function->code()->has_debug_break_slots()) {
+ bool has_activation =
+ SortedListBSearch<Handle<JSFunction> >(
+ active_functions,
+ Handle<JSFunction>(function),
+ HandleObjectPointerCompare<JSFunction>) != -1;
+ if (!has_activation) {
+ function->set_code(*lazy_compile);
+ function->shared()->set_code(*lazy_compile);
+ }
+ }
+ }
}
}
- active_functions.Sort();
-
- // Scan the heap for all non-optimized functions which has no
- // debug break slots.
- HeapIterator iterator;
- HeapObject* obj = NULL;
- while (((obj = iterator.next()) != NULL)) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- if (function->shared()->allows_lazy_compilation() &&
- function->shared()->script()->IsScript() &&
- function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots()) {
- bool has_activation =
- SortedListBSearch<JSFunction*>(active_functions, function) != -1;
- if (!has_activation) {
- function->set_code(lazy_compile);
- function->shared()->set_code(lazy_compile);
+
+ // Now the non-GC scope is left, and the sorting of the functions
+ // in active_function is not ensured any more. The code below does
+ // not rely on it.
+
+ // Now recompile all functions with activation frames and and
+ // patch the return address to run in the new compiled code.
+ for (int i = 0; i < active_functions.length(); i++) {
+ Handle<JSFunction> function = active_functions[i];
+ Handle<SharedFunctionInfo> shared(function->shared());
+ // If recompilation is not possible just skip it.
+ if (shared->is_toplevel() ||
+ !shared->allows_lazy_compilation() ||
+ shared->code()->kind() == Code::BUILTIN) {
+ continue;
+ }
+
+ // Make sure that the shared full code is compiled with debug
+ // break slots.
+ if (function->code() == *lazy_compile) {
+ function->set_code(shared->code());
+ }
+ Handle<Code> current_code(function->code());
+ if (shared->code()->has_debug_break_slots()) {
+ // if the code is already recompiled to have break slots skip
+ // recompilation.
+ ASSERT(!function->code()->has_debug_break_slots());
+ } else {
+ // Try to compile the full code with debug break slots. If it
+ // fails just keep the current code.
+ ASSERT(shared->code() == *current_code);
+ ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
+ shared->set_code(*lazy_compile);
+ bool prev_force_debugger_active =
+ isolate_->debugger()->force_debugger_active();
+ isolate_->debugger()->set_force_debugger_active(true);
+ CompileFullCodeForDebugging(shared, current_code);
+ isolate_->debugger()->set_force_debugger_active(
+ prev_force_debugger_active);
+ if (!shared->is_compiled()) {
+ shared->set_code(*current_code);
+ continue;
+ }
+ }
+ Handle<Code> new_code(shared->code());
+
+ // Find the function and patch the return address.
+ for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ // If the current frame is for this function in its
+ // non-optimized form rewrite the return address to continue
+ // in the newly compiled full code with debug break slots.
+ if (frame->function()->IsJSFunction() &&
+ frame->function() == *function &&
+ frame->LookupCode()->kind() == Code::FUNCTION) {
+ intptr_t delta = frame->pc() - current_code->instruction_start();
+ int debug_break_slot_count = 0;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+ for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
+ // Check if the pc in the new code with debug break
+ // slots is before this slot.
+ RelocInfo* info = it.rinfo();
+ int debug_break_slot_bytes =
+ debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+ intptr_t new_delta =
+ info->pc() -
+ new_code->instruction_start() -
+ debug_break_slot_bytes;
+ if (new_delta > delta) {
+ break;
+ }
+
+ // Passed a debug break slot in the full code with debug
+ // break slots.
+ debug_break_slot_count++;
}
+ int debug_break_slot_bytes =
+ debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+ if (FLAG_trace_deopt) {
+ PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+ "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+ "for debugging, "
+ "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
+ reinterpret_cast<intptr_t>(
+ current_code->instruction_start()),
+ reinterpret_cast<intptr_t>(
+ current_code->instruction_start()) +
+ current_code->instruction_size(),
+ current_code->instruction_size(),
+ reinterpret_cast<intptr_t>(new_code->instruction_start()),
+ reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+ new_code->instruction_size(),
+ new_code->instruction_size(),
+ reinterpret_cast<intptr_t>(frame->pc()),
+ reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+ delta + debug_break_slot_bytes);
+ }
+
+ // Patch the return address to return into the code with
+ // debug break slots.
+ frame->set_pc(
+ new_code->instruction_start() + delta + debug_break_slot_bytes);
}
}
}
@@ -1782,7 +1949,9 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
}
// Ensure shared in compiled. Return false if this failed.
- if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+ if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ return false;
+ }
// Create the debug info object.
Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
@@ -1997,9 +2166,10 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
- // scripts which are no longer referenced.
- heap->CollectAllGarbage(false);
- heap->CollectAllGarbage(false);
+ // scripts which are no longer referenced. The second also sweeps precisely,
+ // which saves us doing yet another GC to make the heap iterable.
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@@ -2007,6 +2177,8 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
+ AssertNoAllocation no_allocation;
+
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
@@ -2047,7 +2219,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- isolate_->heap()->CollectAllGarbage(false);
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
// Get the scripts from the cache.
return script_cache_->GetScripts();
@@ -2069,6 +2241,7 @@ Debugger::Debugger(Isolate* isolate)
compiling_natives_(false),
is_loading_debugger_(false),
never_unload_debugger_(false),
+ force_debugger_active_(false),
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
@@ -2093,7 +2266,8 @@ Debugger::~Debugger() {
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
- int argc, Object*** argv,
+ int argc,
+ Handle<Object> argv[],
bool* caught_exception) {
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
@@ -2110,7 +2284,9 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
Handle<JSObject>(isolate_->debug()->debug_context()->global()),
- argc, argv, caught_exception);
+ argc,
+ argv,
+ caught_exception);
return js_object;
}
@@ -2119,10 +2295,11 @@ Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
// Create the execution state object.
Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id());
- const int argc = 1;
- Object** argv[argc] = { break_id.location() };
+ Handle<Object> argv[] = { break_id };
return MakeJSObject(CStrVector("MakeExecutionState"),
- argc, argv, caught_exception);
+ ARRAY_SIZE(argv),
+ argv,
+ caught_exception);
}
@@ -2130,11 +2307,9 @@ Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit,
bool* caught_exception) {
// Create the new break event object.
- const int argc = 2;
- Object** argv[argc] = { exec_state.location(),
- break_points_hit.location() };
+ Handle<Object> argv[] = { exec_state, break_points_hit };
return MakeJSObject(CStrVector("MakeBreakEvent"),
- argc,
+ ARRAY_SIZE(argv),
argv,
caught_exception);
}
@@ -2146,23 +2321,24 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
bool* caught_exception) {
Factory* factory = isolate_->factory();
// Create the new exception event object.
- const int argc = 3;
- Object** argv[argc] = { exec_state.location(),
- exception.location(),
- uncaught ? factory->true_value().location() :
- factory->false_value().location()};
+ Handle<Object> argv[] = { exec_state,
+ exception,
+ factory->ToBoolean(uncaught) };
return MakeJSObject(CStrVector("MakeExceptionEvent"),
- argc, argv, caught_exception);
+ ARRAY_SIZE(argv),
+ argv,
+ caught_exception);
}
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) {
// Create the new function event object.
- const int argc = 1;
- Object** argv[argc] = { function.location() };
+ Handle<Object> argv[] = { function };
return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
- argc, argv, caught_exception);
+ ARRAY_SIZE(argv),
+ argv,
+ caught_exception);
}
@@ -2173,14 +2349,11 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
- const int argc = 3;
- Object** argv[argc] = { exec_state.location(),
- script_wrapper.location(),
- before ? factory->true_value().location() :
- factory->false_value().location() };
-
+ Handle<Object> argv[] = { exec_state,
+ script_wrapper,
+ factory->ToBoolean(before) };
return MakeJSObject(CStrVector("MakeCompileEvent"),
- argc,
+ ARRAY_SIZE(argv),
argv,
caught_exception);
}
@@ -2191,11 +2364,10 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
- const int argc = 2;
- Object** argv[argc] = { exec_state.location(), id_object.location() };
+ Handle<Object> argv[] = { exec_state, id_object };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
- argc,
+ ARRAY_SIZE(argv),
argv,
caught_exception);
}
@@ -2345,12 +2517,13 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<JSValue> wrapper = GetScriptWrapper(script);
// Call UpdateScriptBreakPoints expect no exceptions.
- bool caught_exception = false;
- const int argc = 1;
- Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
+ bool caught_exception;
+ Handle<Object> argv[] = { wrapper };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(), argc, argv,
- &caught_exception);
+ Isolate::Current()->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &caught_exception);
if (caught_exception) {
return;
}
@@ -2463,7 +2636,8 @@ void Debugger::CallCEventCallback(v8::DebugEvent event,
v8::Debug::ClientData* client_data) {
Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
+ FUNCTION_CAST<v8::Debug::EventCallback2>(
+ callback_obj->foreign_address());
EventDetailsImpl event_details(
event,
Handle<JSObject>::cast(exec_state),
@@ -2481,13 +2655,16 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
- const int argc = 4;
- Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
- exec_state.location(),
- Handle<Object>::cast(event_data).location(),
- event_listener_data_.location() };
- bool caught_exception = false;
- Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
+ Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
+ exec_state,
+ event_data,
+ event_listener_data_ };
+ bool caught_exception;
+ Execution::TryCall(fun,
+ isolate_->global(),
+ ARRAY_SIZE(argv),
+ argv,
+ &caught_exception);
// Silently ignore exceptions from debug event listeners.
}
@@ -2833,7 +3010,9 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_);
- return message_handler_ != NULL || !event_listener_.is_null();
+ return message_handler_ != NULL ||
+ !event_listener_.is_null() ||
+ force_debugger_active_;
}
@@ -2856,12 +3035,11 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
return isolate_->factory()->undefined_value();
}
- static const int kArgc = 2;
- Object** argv[kArgc] = { exec_state.location(), data.location() };
+ Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
- kArgc,
+ ARRAY_SIZE(argv),
argv,
pending_exception);
return result;
@@ -2929,6 +3107,94 @@ void Debugger::CallMessageDispatchHandler() {
}
+EnterDebugger::EnterDebugger()
+ : isolate_(Isolate::Current()),
+ prev_(isolate_->debug()->debugger_entry()),
+ it_(isolate_),
+ has_js_frames_(!it_.done()),
+ save_(isolate_) {
+ Debug* debug = isolate_->debug();
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+
+ // Link recursive debugger entry.
+ debug->set_debugger_entry(this);
+
+ // Store the previous break id and frame id.
+ break_id_ = debug->break_id();
+ break_frame_id_ = debug->break_frame_id();
+
+ // Create the new break info. If there is no JavaScript frames there is no
+ // break frame id.
+ if (has_js_frames_) {
+ debug->NewBreak(it_.frame()->id());
+ } else {
+ debug->NewBreak(StackFrame::NO_ID);
+ }
+
+ // Make sure that debugger is loaded and enter the debugger context.
+ load_failed_ = !debug->Load();
+ if (!load_failed_) {
+ // NOTE the member variable save which saves the previous context before
+ // this change.
+ isolate_->set_context(*debug->debug_context());
+ }
+}
+
+
+EnterDebugger::~EnterDebugger() {
+ ASSERT(Isolate::Current() == isolate_);
+ Debug* debug = isolate_->debug();
+
+ // Restore to the previous break state.
+ debug->SetBreak(break_frame_id_, break_id_);
+
+ // Check for leaving the debugger.
+ if (prev_ == NULL) {
+ // Clear mirror cache when leaving the debugger. Skip this if there is a
+ // pending exception as clearing the mirror cache calls back into
+ // JavaScript. This can happen if the v8::Debug::Call is used in which
+ // case the exception should end up in the calling code.
+ if (!isolate_->has_pending_exception()) {
+ // Try to avoid any pending debug break breaking in the clear mirror
+ // cache JavaScript code.
+ if (isolate_->stack_guard()->IsDebugBreak()) {
+ debug->set_interrupts_pending(DEBUGBREAK);
+ isolate_->stack_guard()->Continue(DEBUGBREAK);
+ }
+ debug->ClearMirrorCache();
+ }
+
+ // Request preemption and debug break when leaving the last debugger entry
+ // if any of these where recorded while debugging.
+ if (debug->is_interrupt_pending(PREEMPT)) {
+ // This re-scheduling of preemption is to avoid starvation in some
+ // debugging scenarios.
+ debug->clear_interrupt_pending(PREEMPT);
+ isolate_->stack_guard()->Preempt();
+ }
+ if (debug->is_interrupt_pending(DEBUGBREAK)) {
+ debug->clear_interrupt_pending(DEBUGBREAK);
+ isolate_->stack_guard()->DebugBreak();
+ }
+
+ // If there are commands in the queue when leaving the debugger request
+ // that these commands are processed.
+ if (isolate_->debugger()->HasCommands()) {
+ isolate_->stack_guard()->DebugCommand();
+ }
+
+ // If leaving the debugger with the debugger no longer active unload it.
+ if (!isolate_->debugger()->IsDebuggerActive()) {
+ isolate_->debugger()->UnloadDebugger();
+ }
+ }
+
+ // Leaving this debugger entry.
+ debug->set_debugger_entry(prev_);
+}
+
+
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index a098040c0..a39d8013e 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -402,7 +402,7 @@ class Debug {
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+ static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
static void GenerateSlotDebugBreak(MacroAssembler* masm);
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
@@ -705,7 +705,8 @@ class Debugger {
void DebugRequest(const uint16_t* json_request, int length);
Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc, Object*** argv,
+ int argc,
+ Handle<Object> argv[],
bool* caught_exception);
Handle<Object> MakeExecutionState(bool* caught_exception);
Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@@ -809,11 +810,15 @@ class Debugger {
}
void set_compiling_natives(bool compiling_natives) {
- Debugger::compiling_natives_ = compiling_natives;
+ compiling_natives_ = compiling_natives;
}
bool compiling_natives() const { return compiling_natives_; }
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
+ void set_force_debugger_active(bool force_debugger_active) {
+ force_debugger_active_ = force_debugger_active;
+ }
+ bool force_debugger_active() const { return force_debugger_active_; }
bool IsDebuggerActive();
@@ -839,6 +844,7 @@ class Debugger {
bool compiling_natives_; // Are we compiling natives?
bool is_loading_debugger_; // Are we loading the debugger?
bool never_unload_debugger_; // Can we unload the debugger?
+ bool force_debugger_active_; // Activate debugger without event listeners.
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
@@ -869,91 +875,8 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
- EnterDebugger()
- : isolate_(Isolate::Current()),
- prev_(isolate_->debug()->debugger_entry()),
- it_(isolate_),
- has_js_frames_(!it_.done()),
- save_(isolate_) {
- Debug* debug = isolate_->debug();
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
- // Link recursive debugger entry.
- debug->set_debugger_entry(this);
-
- // Store the previous break id and frame id.
- break_id_ = debug->break_id();
- break_frame_id_ = debug->break_frame_id();
-
- // Create the new break info. If there is no JavaScript frames there is no
- // break frame id.
- if (has_js_frames_) {
- debug->NewBreak(it_.frame()->id());
- } else {
- debug->NewBreak(StackFrame::NO_ID);
- }
-
- // Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !debug->Load();
- if (!load_failed_) {
- // NOTE the member variable save which saves the previous context before
- // this change.
- isolate_->set_context(*debug->debug_context());
- }
- }
-
- ~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
- Debug* debug = isolate_->debug();
-
- // Restore to the previous break state.
- debug->SetBreak(break_frame_id_, break_id_);
-
- // Check for leaving the debugger.
- if (prev_ == NULL) {
- // Clear mirror cache when leaving the debugger. Skip this if there is a
- // pending exception as clearing the mirror cache calls back into
- // JavaScript. This can happen if the v8::Debug::Call is used in which
- // case the exception should end up in the calling code.
- if (!isolate_->has_pending_exception()) {
- // Try to avoid any pending debug break breaking in the clear mirror
- // cache JavaScript code.
- if (isolate_->stack_guard()->IsDebugBreak()) {
- debug->set_interrupts_pending(DEBUGBREAK);
- isolate_->stack_guard()->Continue(DEBUGBREAK);
- }
- debug->ClearMirrorCache();
- }
-
- // Request preemption and debug break when leaving the last debugger entry
- // if any of these where recorded while debugging.
- if (debug->is_interrupt_pending(PREEMPT)) {
- // This re-scheduling of preemption is to avoid starvation in some
- // debugging scenarios.
- debug->clear_interrupt_pending(PREEMPT);
- isolate_->stack_guard()->Preempt();
- }
- if (debug->is_interrupt_pending(DEBUGBREAK)) {
- debug->clear_interrupt_pending(DEBUGBREAK);
- isolate_->stack_guard()->DebugBreak();
- }
-
- // If there are commands in the queue when leaving the debugger request
- // that these commands are processed.
- if (isolate_->debugger()->HasCommands()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- // If leaving the debugger with the debugger no longer active unload it.
- if (!isolate_->debugger()->IsDebuggerActive()) {
- isolate_->debugger()->UnloadDebugger();
- }
- }
-
- // Leaving this debugger entry.
- debug->set_debugger_entry(prev_);
- }
+ EnterDebugger();
+ ~EnterDebugger();
// Check whether the debugger could be entered.
inline bool FailedToEnter() { return load_failed_; }
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 5feb73d73..108e547f2 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -52,11 +52,13 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) {
- eager_deoptimization_entry_code_->Free(EXECUTABLE);
+ Isolate::Current()->memory_allocator()->Free(
+ eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL;
}
if (lazy_deoptimization_entry_code_ != NULL) {
- lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+ Isolate::Current()->memory_allocator()->Free(
+ lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL;
}
}
@@ -71,6 +73,8 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
+// We rely on this function not causing a GC. It is called from generated code
+// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@@ -112,25 +116,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Get the function and code from the frame.
JSFunction* function = JSFunction::cast(frame->function());
Code* code = frame->LookupCode();
- Address code_start_address = code->instruction_start();
// Locate the deoptimization point in the code. As we are at a call the
// return address must be at a place in the code with deoptimization support.
- int deoptimization_index = Safepoint::kNoDeoptimizationIndex;
- // Scope this as the safe point constructor will disallow allocation.
- {
- SafepointTable table(code);
- for (unsigned i = 0; i < table.length(); ++i) {
- Address address = code_start_address + table.GetPcOffset(i);
- if (address == frame->pc()) {
- SafepointEntry safepoint_entry = table.GetEntry(i);
- ASSERT(safepoint_entry.deoptimization_index() !=
- Safepoint::kNoDeoptimizationIndex);
- deoptimization_index = safepoint_entry.deoptimization_index();
- break;
- }
- }
- }
+ SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
+ int deoptimization_index = safepoint_entry.deoptimization_index();
ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
// Always use the actual stack slots when calculating the fp to sp
@@ -319,6 +309,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
input_(NULL),
output_count_(0),
output_(NULL),
+ frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
+ has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@@ -343,6 +335,26 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (type == EAGER) {
ASSERT(from == NULL);
optimized_code_ = function_->code();
+ if (FLAG_trace_deopt && FLAG_code_comments) {
+ // Print instruction associated with this bailout.
+ const char* last_comment = NULL;
+ int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+ | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->rmode() == RelocInfo::COMMENT) {
+ last_comment = reinterpret_cast<const char*>(info->data());
+ }
+ if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
+ unsigned id = Deoptimizer::GetDeoptimizationId(
+ info->target_address(), Deoptimizer::EAGER);
+ if (id == bailout_id && last_comment != NULL) {
+ PrintF(" %s\n", last_comment);
+ break;
+ }
+ }
+ }
+ }
} else if (type == LAZY) {
optimized_code_ = FindDeoptimizingCodeFromAddress(from);
ASSERT(optimized_code_ != NULL);
@@ -386,7 +398,7 @@ void Deoptimizer::DeleteFrameDescriptions() {
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL;
- LargeObjectChunk* base = NULL;
+ MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
@@ -400,12 +412,12 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
- static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
+ static_cast<Address>(base->body()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- LargeObjectChunk* base = NULL;
+ MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
@@ -413,14 +425,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
- addr < base->GetStartAddress() ||
- addr >= base->GetStartAddress() +
+ addr < base->body() ||
+ addr >= base->body() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
- return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
+ static_cast<int>(addr - base->body()) % table_entry_size_);
+ return static_cast<int>(addr - base->body()) / table_entry_size_;
}
@@ -462,6 +474,8 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
}
+// We rely on this function not causing a GC. It is called from generated code
+// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
if (bailout_type_ == OSR) {
DoComputeOsrOutputFrame();
@@ -613,11 +627,13 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
intptr_t input_value = input_->GetRegister(input_reg);
if (FLAG_trace_deopt) {
PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -675,10 +691,12 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+ PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
output_offset,
input_value,
input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -850,10 +868,12 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
+ PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
*input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
}
output->SetFrameSlot(output_offset, input_value);
break;
@@ -953,7 +973,10 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
+ PatchStackCheckCodeAt(unoptimized_code,
+ pc_after,
+ check_code,
+ replacement_code);
stack_check_cursor += 2 * kIntSize;
}
}
@@ -972,7 +995,10 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
+ RevertStackCheckCodeAt(unoptimized_code,
+ pc_after,
+ check_code,
+ replacement_code);
stack_check_cursor += 2 * kIntSize;
}
}
@@ -1039,7 +1065,7 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address,
}
-LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
+MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
@@ -1053,12 +1079,15 @@ LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+ MemoryChunk* chunk =
+ Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+ EXECUTABLE,
+ NULL);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
- memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+ memcpy(chunk->body(), desc.buffer, desc.instr_size);
+ CPU::FlushICache(chunk->body(), desc.instr_size);
return chunk;
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 8641261b1..284676c36 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -86,8 +86,8 @@ class DeoptimizerData {
#endif
private:
- LargeObjectChunk* eager_deoptimization_entry_code_;
- LargeObjectChunk* lazy_deoptimization_entry_code_;
+ MemoryChunk* eager_deoptimization_entry_code_;
+ MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -173,7 +173,8 @@ class Deoptimizer : public Malloced {
// Patch stack guard check at instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
- static void PatchStackCheckCodeAt(Address pc_after,
+ static void PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code);
@@ -185,7 +186,8 @@ class Deoptimizer : public Malloced {
// Change all patched stack guard checks in the unoptimized code
// back to a normal stack guard check.
- static void RevertStackCheckCodeAt(Address pc_after,
+ static void RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code);
@@ -211,6 +213,11 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int frame_alignment_marker_offset() {
+ return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
+ static int has_alignment_padding_offset() {
+ return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+ }
static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -285,7 +292,7 @@ class Deoptimizer : public Malloced {
void AddDoubleValue(intptr_t slot_address, double value);
- static LargeObjectChunk* CreateCode(BailoutType type);
+ static MemoryChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -315,6 +322,10 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
+ // Frames can be dynamically padded on ia32 to align untagged doubles.
+ Object* frame_alignment_marker_;
+ intptr_t has_alignment_padding_;
+
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;
@@ -358,7 +369,20 @@ class FrameDescription {
}
double GetDoubleFrameSlot(unsigned offset) {
- return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+ intptr_t* ptr = GetFrameSlotPointer(offset);
+#if V8_TARGET_ARCH_MIPS
+ // Prevent gcc from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned double. Uses two lwc1 instructions.
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
+ c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
+ return c.d;
+#else
+ return *reinterpret_cast<double*>(ptr);
+#endif
}
void SetFrameSlot(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 1e67b4cb6..e3b40ab93 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -200,7 +200,7 @@ static int DecodeIt(FILE* f,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
// Indent the printing of the reloc info.
if (i == 0) {
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
index 65eded998..16a3245e9 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/double.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,8 +34,8 @@ namespace v8 {
namespace internal {
// We assume that doubles and uint64_t have the same endianness.
-static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
// Helper functions for doubles.
class Double {
diff --git a/deps/v8/src/dtoa.h b/deps/v8/src/dtoa.h
index b3e79afa4..a2d6fdebd 100644
--- a/deps/v8/src/dtoa.h
+++ b/deps/v8/src/dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,7 +47,7 @@ enum DtoaMode {
// The maximal length of digits a double can have in base 10.
// Note that DoubleToAscii null-terminates its input. So the given buffer should
// be at least kBase10MaximalLength + 1 characters long.
-static const int kBase10MaximalLength = 17;
+const int kBase10MaximalLength = 17;
// Converts the given double 'v' to ascii.
// The result should be interpreted as buffer * 10^(point-length).
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index e4ecfe8dd..ef55d54ab 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -31,6 +31,30 @@
#include "elements.h"
#include "utils.h"
+
+// Each concrete ElementsAccessor can handle exactly one ElementsKind,
+// several abstract ElementsAccessor classes are used to allow sharing
+// common code.
+//
+// Inheritance hierarchy:
+// - ElementsAccessorBase (abstract)
+// - FastElementsAccessor (abstract)
+// - FastObjectElementsAccessor
+// - FastDoubleElementsAccessor
+// - ExternalElementsAccessor (abstract)
+// - ExternalByteElementsAccessor
+// - ExternalUnsignedByteElementsAccessor
+// - ExternalShortElementsAccessor
+// - ExternalUnsignedShortElementsAccessor
+// - ExternalIntElementsAccessor
+// - ExternalUnsignedIntElementsAccessor
+// - ExternalFloatElementsAccessor
+// - ExternalDoubleElementsAccessor
+// - PixelElementsAccessor
+// - DictionaryElementsAccessor
+// - NonStrictArgumentsElementsAccessor
+
+
namespace v8 {
namespace internal {
@@ -38,7 +62,7 @@ namespace internal {
ElementsAccessor** ElementsAccessor::elements_accessors_;
-bool HasKey(FixedArray* array, Object* key) {
+static bool HasKey(FixedArray* array, Object* key) {
int len0 = array->length();
for (int i = 0; i < len0; i++) {
Object* element = array->get(i);
@@ -52,6 +76,14 @@ bool HasKey(FixedArray* array, Object* key) {
}
+static Failure* ThrowArrayLengthRangeError(Heap* heap) {
+ HandleScope scope(heap->isolate());
+ return heap->isolate()->Throw(
+ *heap->isolate()->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+}
+
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -91,6 +123,17 @@ class ElementsAccessorBase : public ElementsAccessor {
return backing_store->GetHeap()->the_hole_value();
}
+ virtual MaybeObject* SetLength(JSObject* obj,
+ Object* length) {
+ ASSERT(obj->IsJSArray());
+ return ElementsAccessorSubclass::SetLength(
+ BackingStoreClass::cast(obj->elements()), obj, length);
+ }
+
+ static MaybeObject* SetLength(BackingStoreClass* backing_store,
+ JSObject* obj,
+ Object* length);
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@@ -222,12 +265,76 @@ class ElementsAccessorBase : public ElementsAccessor {
};
+// Super class for all fast element arrays.
+template<typename FastElementsAccessorSubclass,
+ typename BackingStore,
+ int ElementSize>
class FastElementsAccessor
- : public ElementsAccessorBase<FastElementsAccessor, FixedArray> {
+ : public ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore> {
+ protected:
+ friend class ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore>;
+
+ // Adjusts the length of the fast backing store or returns the new length or
+ // undefined in case conversion to a slow backing store should be performed.
+ static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
+ JSArray* array,
+ Object* length_object,
+ uint32_t length) {
+ uint32_t old_capacity = backing_store->length();
+
+ // Check whether the backing store should be shrunk.
+ if (length <= old_capacity) {
+ if (array->HasFastTypeElements()) {
+ MaybeObject* maybe_obj = array->EnsureWritableFastElements();
+ if (!maybe_obj->To(&backing_store)) return maybe_obj;
+ }
+ if (2 * length <= old_capacity) {
+ // If more than half the elements won't be used, trim the array.
+ if (length == 0) {
+ array->initialize_elements();
+ } else {
+ backing_store->set_length(length);
+ Address filler_start = backing_store->address() +
+ BackingStore::OffsetOfElementAt(length);
+ int filler_size = (old_capacity - length) * ElementSize;
+ array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+ }
+ } else {
+ // Otherwise, fill the unused tail with holes.
+ int old_length = FastD2I(array->length()->Number());
+ for (int i = length; i < old_length; i++) {
+ backing_store->set_the_hole(i);
+ }
+ }
+ return length_object;
+ }
+
+ // Check whether the backing store should be expanded.
+ uint32_t min = JSObject::NewElementsCapacity(old_capacity);
+ uint32_t new_capacity = length > min ? length : min;
+ if (!array->ShouldConvertToSlowElements(new_capacity)) {
+ MaybeObject* result = FastElementsAccessorSubclass::
+ SetFastElementsCapacityAndLength(array, new_capacity, length);
+ if (result->IsFailure()) return result;
+ return length_object;
+ }
+
+ // Request conversion to slow elements.
+ return array->GetHeap()->undefined_value();
+ }
+};
+
+
+class FastObjectElementsAccessor
+ : public FastElementsAccessor<FastObjectElementsAccessor,
+ FixedArray,
+ kPointerSize> {
public:
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
- ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
+ ASSERT(obj->HasFastElements() ||
+ obj->HasFastSmiOnlyElements() ||
+ obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@@ -270,6 +377,22 @@ class FastElementsAccessor
}
protected:
+ friend class FastElementsAccessor<FastObjectElementsAccessor,
+ FixedArray,
+ kPointerSize>;
+
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ uint32_t capacity,
+ uint32_t length) {
+ JSObject::SetFastElementsCapacityMode set_capacity_mode =
+ obj->HasFastSmiOnlyElements()
+ ? JSObject::kAllowSmiOnlyElements
+ : JSObject::kDontAllowSmiOnlyElements;
+ return obj->SetFastElementsCapacityAndLength(capacity,
+ length,
+ set_capacity_mode);
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -279,11 +402,21 @@ class FastElementsAccessor
class FastDoubleElementsAccessor
- : public ElementsAccessorBase<FastDoubleElementsAccessor,
- FixedDoubleArray> {
+ : public FastElementsAccessor<FastDoubleElementsAccessor,
+ FixedDoubleArray,
+ kDoubleSize> {
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
FixedDoubleArray>;
+ friend class FastElementsAccessor<FastDoubleElementsAccessor,
+ FixedDoubleArray,
+ kDoubleSize>;
+
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ uint32_t capacity,
+ uint32_t length) {
+ return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+ }
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
@@ -327,6 +460,14 @@ class ExternalElementsAccessor
}
}
+ static MaybeObject* SetLength(ExternalArray* backing_store,
+ JSObject* obj,
+ Object* length) {
+ // External arrays do not support changing their length.
+ UNREACHABLE();
+ return obj;
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -394,6 +535,63 @@ class DictionaryElementsAccessor
: public ElementsAccessorBase<DictionaryElementsAccessor,
NumberDictionary> {
public:
+ // Adjusts the length of the dictionary backing store and returns the new
+ // length according to ES5 section 15.4.5.2 behavior.
+ static MaybeObject* SetLengthWithoutNormalize(NumberDictionary* dict,
+ JSArray* array,
+ Object* length_object,
+ uint32_t length) {
+ if (length == 0) {
+ // If the length of a slow array is reset to zero, we clear
+ // the array and flush backing storage. This has the added
+ // benefit that the array returns to fast mode.
+ Object* obj;
+ MaybeObject* maybe_obj = array->ResetElements();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ } else {
+ uint32_t new_length = length;
+ uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
+ if (new_length < old_length) {
+ // Find last non-deletable element in range of elements to be
+ // deleted and adjust range accordingly.
+ Heap* heap = array->GetHeap();
+ int capacity = dict->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (new_length <= number && number < old_length) {
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.IsDontDelete()) new_length = number + 1;
+ }
+ }
+ }
+ if (new_length != length) {
+ MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
+ if (!maybe_object->To(&length_object)) return maybe_object;
+ }
+
+ // Remove elements that should be deleted.
+ int removed_entries = 0;
+ Object* the_hole_value = heap->the_hole_value();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (new_length <= number && number < old_length) {
+ dict->SetEntry(i, the_hole_value, the_hole_value);
+ removed_entries++;
+ }
+ }
+ }
+
+ // Update the number of elements.
+ dict->ElementsRemoved(removed_entries);
+ }
+ }
+ return length_object;
+ }
+
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -503,9 +701,17 @@ class NonStrictArgumentsElementsAccessor
}
}
+ static MaybeObject* SetLength(FixedArray* parameter_map,
+ JSObject* obj,
+ Object* length) {
+ // TODO(mstarzinger): This was never implemented but will be used once we
+ // correctly implement [[DefineOwnProperty]] on arrays.
+ UNIMPLEMENTED();
+ return obj;
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key
- ,
+ uint32_t key,
JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
Object* probe = GetParameterMapArg(parameter_map, key);
@@ -519,7 +725,7 @@ class NonStrictArgumentsElementsAccessor
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
- return FastElementsAccessor::DeleteCommon(obj, key);
+ return FastObjectElementsAccessor::DeleteCommon(obj, key);
}
}
return obj->GetHeap()->true_value();
@@ -595,40 +801,108 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() {
+ // First argument in list is the accessor class, the second argument is can
+ // be any arbitrary unique identifier, in this case chosen to be the
+ // corresponding enum. Use the fast element handler for smi-only arrays.
+ // The implementation is currently identical. Note that the order must match
+ // that of the ElementsKind enum for the |accessor_array[]| below to work.
+#define ELEMENTS_LIST(V) \
+ V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS) \
+ V(FastObjectElementsAccessor, FAST_ELEMENTS) \
+ V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS) \
+ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS) \
+ V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS) \
+ V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS) \
+ V(ExternalUnsignedByteElementsAccessor, EXTERNAL_UNSIGNED_BYTE_ELEMENTS) \
+ V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS) \
+ V(ExternalUnsignedShortElementsAccessor, EXTERNAL_UNSIGNED_SHORT_ELEMENTS) \
+ V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS) \
+ V(ExternalUnsignedIntElementsAccessor, EXTERNAL_UNSIGNED_INT_ELEMENTS) \
+ V(ExternalFloatElementsAccessor, EXTERNAL_FLOAT_ELEMENTS) \
+ V(ExternalDoubleElementsAccessor, EXTERNAL_DOUBLE_ELEMENTS) \
+ V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS)
+
static struct ConcreteElementsAccessors {
- FastElementsAccessor fast_elements_handler;
- FastDoubleElementsAccessor fast_double_elements_handler;
- DictionaryElementsAccessor dictionary_elements_handler;
- NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
- ExternalByteElementsAccessor byte_elements_handler;
- ExternalUnsignedByteElementsAccessor unsigned_byte_elements_handler;
- ExternalShortElementsAccessor short_elements_handler;
- ExternalUnsignedShortElementsAccessor unsigned_short_elements_handler;
- ExternalIntElementsAccessor int_elements_handler;
- ExternalUnsignedIntElementsAccessor unsigned_int_elements_handler;
- ExternalFloatElementsAccessor float_elements_handler;
- ExternalDoubleElementsAccessor double_elements_handler;
- PixelElementsAccessor pixel_elements_handler;
- } element_accessors;
+#define ACCESSOR_STRUCT(Class, Name) Class* Name##_handler;
+ ELEMENTS_LIST(ACCESSOR_STRUCT)
+#undef ACCESSOR_STRUCT
+ } element_accessors = {
+#define ACCESSOR_INIT(Class, Name) new Class(),
+ ELEMENTS_LIST(ACCESSOR_INIT)
+#undef ACCESSOR_INIT
+ };
static ElementsAccessor* accessor_array[] = {
- &element_accessors.fast_elements_handler,
- &element_accessors.fast_double_elements_handler,
- &element_accessors.dictionary_elements_handler,
- &element_accessors.non_strict_arguments_elements_handler,
- &element_accessors.byte_elements_handler,
- &element_accessors.unsigned_byte_elements_handler,
- &element_accessors.short_elements_handler,
- &element_accessors.unsigned_short_elements_handler,
- &element_accessors.int_elements_handler,
- &element_accessors.unsigned_int_elements_handler,
- &element_accessors.float_elements_handler,
- &element_accessors.double_elements_handler,
- &element_accessors.pixel_elements_handler
+#define ACCESSOR_ARRAY(Class, Name) element_accessors.Name##_handler,
+ ELEMENTS_LIST(ACCESSOR_ARRAY)
+#undef ACCESSOR_ARRAY
};
+#undef ELEMENTS_LIST
+
+ STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
+ kElementsKindCount);
+
elements_accessors_ = accessor_array;
}
+template <typename ElementsAccessorSubclass, typename BackingStoreClass>
+MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
+ SetLength(BackingStoreClass* backing_store,
+ JSObject* obj,
+ Object* length) {
+ JSArray* array = JSArray::cast(obj);
+
+ // Fast case: The new length fits into a Smi.
+ MaybeObject* maybe_smi_length = length->ToSmi();
+ Object* smi_length = Smi::FromInt(0);
+ if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
+ const int value = Smi::cast(smi_length)->value();
+ if (value >= 0) {
+ Object* new_length;
+ MaybeObject* result = ElementsAccessorSubclass::
+ SetLengthWithoutNormalize(backing_store, array, smi_length, value);
+ if (!result->ToObject(&new_length)) return result;
+ ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+ if (new_length->IsSmi()) {
+ array->set_length(Smi::cast(new_length));
+ return array;
+ }
+ } else {
+ return ThrowArrayLengthRangeError(array->GetHeap());
+ }
+ }
+
+ // Slow case: The new length does not fit into a Smi or conversion
+ // to slow elements is needed for other reasons.
+ if (length->IsNumber()) {
+ uint32_t value;
+ if (length->ToArrayIndex(&value)) {
+ NumberDictionary* dictionary;
+ MaybeObject* maybe_object = array->NormalizeElements();
+ if (!maybe_object->To(&dictionary)) return maybe_object;
+ Object* new_length;
+ MaybeObject* result = DictionaryElementsAccessor::
+ SetLengthWithoutNormalize(dictionary, array, length, value);
+ if (!result->ToObject(&new_length)) return result;
+ ASSERT(new_length->IsNumber());
+ array->set_length(new_length);
+ return array;
+ } else {
+ return ThrowArrayLengthRangeError(array->GetHeap());
+ }
+ }
+
+ // Fall-back case: The new length is not a number so make the array
+ // size one and set only element to length.
+ FixedArray* new_backing_store;
+ MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
+ if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
+ new_backing_store->set(0, length);
+ array->SetContent(new_backing_store);
+ return array;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 851c8c3d9..ed1ca5e58 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -44,6 +44,11 @@ class ElementsAccessor {
JSObject* holder,
Object* receiver) = 0;
+ // Modifies the length data property as specified for JSArrays and resizes
+ // the underlying backing store accordingly.
+ virtual MaybeObject* SetLength(JSObject* holder,
+ Object* new_length) = 0;
+
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index f36d4e491..b16e7396a 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -33,6 +33,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
#include "v8threads.h"
@@ -65,13 +66,13 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
}
-static Handle<Object> Invoke(bool construct,
- Handle<JSFunction> func,
+static Handle<Object> Invoke(bool is_construct,
+ Handle<JSFunction> function,
Handle<Object> receiver,
int argc,
- Object*** args,
+ Handle<Object> args[],
bool* has_pending_exception) {
- Isolate* isolate = func->GetIsolate();
+ Isolate* isolate = function->GetIsolate();
// Entering JavaScript.
VMState state(isolate, JS);
@@ -79,21 +80,15 @@ static Handle<Object> Invoke(bool construct,
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
- typedef Object* (*JSEntryFunction)(
- byte* entry,
- Object* function,
- Object* receiver,
- int argc,
- Object*** args);
-
- Handle<Code> code;
- if (construct) {
- JSConstructEntryStub stub;
- code = stub.GetCode();
- } else {
- JSEntryStub stub;
- code = stub.GetCode();
- }
+ typedef Object* (*JSEntryFunction)(byte* entry,
+ Object* function,
+ Object* receiver,
+ int argc,
+ Object*** args);
+
+ Handle<Code> code = is_construct
+ ? isolate->factory()->js_construct_entry_code()
+ : isolate->factory()->js_entry_code();
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
@@ -105,21 +100,22 @@ static Handle<Object> Invoke(bool construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
- ASSERT(func->context()->global()->IsGlobalObject());
+ ASSERT(function->context()->global()->IsGlobalObject());
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
NoHandleAllocation na;
- JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+ JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
- byte* entry_address = func->code()->entry();
- JSFunction* function = *func;
- Object* receiver_pointer = *receiver;
- value = CALL_GENERATED_CODE(entry, entry_address, function,
- receiver_pointer, argc, args);
+ byte* function_entry = function->code()->entry();
+ JSFunction* func = *function;
+ Object* recv = *receiver;
+ Object*** argv = reinterpret_cast<Object***>(args);
+ value =
+ CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
}
#ifdef DEBUG
@@ -148,9 +144,11 @@ static Handle<Object> Invoke(bool construct,
Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
- Object*** args,
+ Handle<Object> argv[],
bool* pending_exception,
bool convert_receiver) {
+ *pending_exception = false;
+
if (!callable->IsJSFunction()) {
callable = TryGetFunctionDelegate(callable, pending_exception);
if (*pending_exception) return callable;
@@ -159,7 +157,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
// In non-strict mode, convert receiver.
if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() && !func->shared()->strict_mode()) {
+ !func->shared()->native() && func->shared()->is_classic_mode()) {
if (receiver->IsUndefined() || receiver->IsNull()) {
Object* global = func->context()->global()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -172,13 +170,15 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (*pending_exception) return callable;
}
- return Invoke(false, func, receiver, argc, args, pending_exception);
+ return Invoke(false, func, receiver, argc, argv, pending_exception);
}
-Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
- Object*** args, bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global(), argc, args,
+Handle<Object> Execution::New(Handle<JSFunction> func,
+ int argc,
+ Handle<Object> argv[],
+ bool* pending_exception) {
+ return Invoke(true, func, Isolate::Current()->global(), argc, argv,
pending_exception);
}
@@ -186,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
- Object*** args,
+ Handle<Object> args[],
bool* caught_exception) {
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
@@ -195,6 +195,7 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
v8::TryCatch catcher;
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
+ *caught_exception = false;
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
@@ -377,7 +378,7 @@ void StackGuard::DisableInterrupts() {
bool StackGuard::IsInterrupted() {
ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & INTERRUPT;
+ return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
}
@@ -403,7 +404,7 @@ void StackGuard::Preempt() {
bool StackGuard::IsTerminateExecution() {
ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & TERMINATE;
+ return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
}
@@ -416,7 +417,7 @@ void StackGuard::TerminateExecution() {
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
+ return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
}
@@ -433,6 +434,22 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
+bool StackGuard::IsGCRequest() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
+}
+
+
+void StackGuard::RequestGC() {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= GC_REQUEST;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@@ -555,14 +572,15 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
- do { \
- Isolate* isolate = Isolate::Current(); \
- Object** args[argc] = argv; \
- ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
- isolate->js_builtins_object(), argc, args, \
- has_pending_exception); \
+#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
+ do { \
+ Isolate* isolate = Isolate::Current(); \
+ Handle<Object> argv[] = args; \
+ ASSERT(has_pending_exception != NULL); \
+ return Call(isolate->name##_fun(), \
+ isolate->js_builtins_object(), \
+ ARRAY_SIZE(argv), argv, \
+ has_pending_exception); \
} while (false)
@@ -583,44 +601,44 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_string, { obj }, exc);
}
Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
}
Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
- RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_object, { obj }, exc);
}
Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_integer, { obj }, exc);
}
Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
}
Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
+ RETURN_NATIVE_CALL(to_int32, { obj }, exc);
}
Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<Object> time_obj = FACTORY->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
+ RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
@@ -657,7 +675,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
bool caught_exception;
Handle<Object> index_object = factory->NewNumberFromInt(int_index);
- Object** index_arg[] = { index_object.location() };
+ Handle<Object> index_arg[] = { index_object };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string,
ARRAY_SIZE(index_arg),
@@ -671,7 +689,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<JSFunction> Execution::InstantiateFunction(
- Handle<FunctionTemplateInfo> data, bool* exc) {
+ Handle<FunctionTemplateInfo> data,
+ bool* exc) {
Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
@@ -680,10 +699,12 @@ Handle<JSFunction> Execution::InstantiateFunction(
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
- Object** args[1] = { Handle<Object>::cast(data).location() };
- Handle<Object> result =
- Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(), 1, args, exc);
+ Handle<Object> args[] = { data };
+ Handle<Object> result = Call(isolate->instantiate_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args,
+ exc);
if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result);
}
@@ -710,10 +731,12 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
ASSERT(!*exc);
return Handle<JSObject>(JSObject::cast(result));
} else {
- Object** args[1] = { Handle<Object>::cast(data).location() };
- Handle<Object> result =
- Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(), 1, args, exc);
+ Handle<Object> args[] = { data };
+ Handle<Object> result = Call(isolate->instantiate_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args,
+ exc);
if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result);
}
@@ -724,9 +747,12 @@ void Execution::ConfigureInstance(Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
Isolate* isolate = Isolate::Current();
- Object** args[2] = { instance.location(), instance_template.location() };
+ Handle<Object> args[] = { instance, instance_template };
Execution::Call(isolate->configure_instance_fun(),
- isolate->js_builtins_object(), 2, args, exc);
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args,
+ exc);
}
@@ -735,16 +761,13 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> pos,
Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate();
- const int argc = 4;
- Object** args[argc] = { recv.location(),
- Handle<Object>::cast(fun).location(),
- pos.location(),
- is_global.location() };
- bool caught_exception = false;
- Handle<Object> result =
- TryCall(isolate->get_stack_trace_line_fun(),
- isolate->js_builtins_object(), argc, args,
- &caught_exception);
+ Handle<Object> args[] = { recv, fun, pos, is_global };
+ bool caught_exception;
+ Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
+ isolate->js_builtins_object(),
+ ARRAY_SIZE(args),
+ args,
+ &caught_exception);
if (caught_exception || !result->IsString()) {
return isolate->factory()->empty_symbol();
}
@@ -852,6 +875,12 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
MaybeObject* Execution::HandleStackGuardInterrupt() {
Isolate* isolate = Isolate::Current();
StackGuard* stack_guard = isolate->stack_guard();
+
+ if (stack_guard->IsGCRequest()) {
+ isolate->heap()->CollectAllGarbage(false);
+ stack_guard->Continue(GC_REQUEST);
+ }
+
isolate->counters()->stack_interrupts()->Increment();
if (stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 5cd7141fc..f2d17d079 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,7 +41,8 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- RUNTIME_PROFILER_TICK = 1 << 5
+ RUNTIME_PROFILER_TICK = 1 << 5,
+ GC_REQUEST = 1 << 6
};
class Execution : public AllStatic {
@@ -60,7 +61,7 @@ class Execution : public AllStatic {
static Handle<Object> Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
- Object*** args,
+ Handle<Object> argv[],
bool* pending_exception,
bool convert_receiver = false);
@@ -73,7 +74,7 @@ class Execution : public AllStatic {
//
static Handle<Object> New(Handle<JSFunction> func,
int argc,
- Object*** args,
+ Handle<Object> argv[],
bool* pending_exception);
// Call a function, just like Call(), but make sure to silently catch
@@ -83,7 +84,7 @@ class Execution : public AllStatic {
static Handle<Object> TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
- Object*** args,
+ Handle<Object> argv[],
bool* caught_exception);
// ECMA-262 9.2
@@ -196,6 +197,8 @@ class StackGuard {
bool IsDebugCommand();
void DebugCommand();
#endif
+ bool IsGCRequest();
+ void RequestGC();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 3740c27aa..54c8cdc0c 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -40,19 +40,15 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- bool compact = false;
- // All allocation spaces other than NEW_SPACE have the same effect.
- if (args.Length() >= 1 && args[0]->IsBoolean()) {
- compact = args[0]->BooleanValue();
- }
- HEAP->CollectAllGarbage(compact);
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
return v8::Undefined();
}
void GCExtension::Register() {
- static GCExtension gc_extension;
- static v8::DeclareExtension gc_extension_declaration(&gc_extension);
+ static GCExtension* gc_extension = NULL;
+ if (gc_extension == NULL) gc_extension = new GCExtension();
+ static v8::DeclareExtension gc_extension_declaration(gc_extension);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 97289266e..f1042a4c6 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -59,13 +59,13 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
}
-Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
- PretenureFlag pretenure) {
+Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
+ PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
- FixedArray);
+ FixedDoubleArray);
}
@@ -85,6 +85,14 @@ Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
}
+Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(isolate(),
+ ObjectHashSet::Allocate(at_least_space_for),
+ ObjectHashSet);
+}
+
+
Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
@@ -234,7 +242,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
Handle<String> Factory::NewExternalStringFromAscii(
- ExternalAsciiString::Resource* resource) {
+ const ExternalAsciiString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromAscii(resource),
@@ -243,7 +251,7 @@ Handle<String> Factory::NewExternalStringFromAscii(
Handle<String> Factory::NewExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource) {
+ const ExternalTwoByteString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@@ -295,7 +303,7 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<Context> Factory::NewBlockContext(
Handle<JSFunction> function,
Handle<Context> previous,
- Handle<SerializedScopeInfo> scope_info) {
+ Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateBlockContext(*function,
@@ -404,10 +412,12 @@ Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
}
-Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
+Handle<Map> Factory::NewMap(InstanceType type,
+ int instance_size,
+ ElementsKind elements_kind) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateMap(type, instance_size),
+ isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
Map);
}
@@ -455,23 +465,11 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
}
-Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
-}
-
-
-Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
-}
-
-
Handle<Map> Factory::GetElementsTransitionMap(
- Handle<Map> src,
- ElementsKind elements_kind,
- bool safe_to_add_transition) {
+ Handle<JSObject> src,
+ ElementsKind elements_kind) {
CALL_HEAP_FUNCTION(isolate(),
- src->GetElementsTransitionMap(elements_kind,
- safe_to_add_transition),
+ src->GetElementsTransitionMap(elements_kind),
Map);
}
@@ -481,6 +479,12 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
}
+Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
+ Handle<FixedDoubleArray> array) {
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
+}
+
+
Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
@@ -501,22 +505,26 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
function_info,
- function_info->strict_mode()
- ? isolate()->strict_mode_function_map()
- : isolate()->function_map(),
+ function_info->is_classic_mode()
+ ? isolate()->function_map()
+ : isolate()->strict_mode_function_map(),
pretenure);
result->set_context(*context);
- int number_of_literals = function_info->num_literals();
- Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
- if (number_of_literals > 0) {
- // Store the object, regexp and array functions in the literals
- // array prefix. These functions will be used when creating
- // object, regexp and array literals in this function.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
+ if (!function_info->bound()) {
+ int number_of_literals = function_info->num_literals();
+ Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
+ if (number_of_literals > 0) {
+ // Store the object, regexp and array functions in the literals
+ // array prefix. These functions will be used when creating
+ // object, regexp and array literals in this function.
+ literals->set(JSFunction::kLiteralGlobalContextIndex,
+ context->global_context());
+ }
+ result->set_literals(*literals);
+ } else {
+ result->set_function_bindings(isolate()->heap()->empty_fixed_array());
}
- result->set_literals(*literals);
result->set_next_function_link(isolate()->heap()->undefined_value());
if (V8::UseCrankshaft() &&
@@ -538,17 +546,19 @@ Handle<Object> Factory::NewNumber(double value,
}
-Handle<Object> Factory::NewNumberFromInt(int value) {
+Handle<Object> Factory::NewNumberFromInt(int32_t value,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->NumberFromInt32(value), Object);
+ isolate()->heap()->NumberFromInt32(value, pretenure), Object);
}
-Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->NumberFromUint32(value), Object);
+ isolate()->heap()->NumberFromUint32(value, pretenure), Object);
}
@@ -641,14 +651,16 @@ Handle<Object> Factory::NewError(const char* maker,
return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type);
- Object** argv[2] = { type_obj.location(),
- Handle<Object>::cast(args).location() };
+ Handle<Object> argv[] = { type_obj, args };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(), 2, argv, &caught_exception);
+ isolate()->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &caught_exception);
return result;
}
@@ -664,13 +676,16 @@ Handle<Object> Factory::NewError(const char* constructor,
Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr)));
- Object** argv[1] = { Handle<Object>::cast(message).location() };
+ Handle<Object> argv[] = { message };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(), 1, argv, &caught_exception);
+ isolate()->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &caught_exception);
return result;
}
@@ -722,7 +737,12 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
if (force_initial_map ||
type != JS_OBJECT_TYPE ||
instance_size != JSObject::kHeaderSize) {
- Handle<Map> initial_map = NewMap(type, instance_size);
+ ElementsKind default_elements_kind = FLAG_smi_only_arrays
+ ? FAST_SMI_ONLY_ELEMENTS
+ : FAST_ELEMENTS;
+ Handle<Map> initial_map = NewMap(type,
+ instance_size,
+ default_elements_kind);
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@@ -739,7 +759,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code) {
Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
- kNonStrictMode);
+ CLASSIC_MODE);
function->shared()->set_code(*code);
function->set_code(*code);
ASSERT(!function->has_initial_map());
@@ -748,11 +768,11 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
}
-Handle<SerializedScopeInfo> Factory::NewSerializedScopeInfo(int length) {
+Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateSerializedScopeInfo(length),
- SerializedScopeInfo);
+ isolate()->heap()->AllocateScopeInfo(length),
+ ScopeInfo);
}
@@ -821,10 +841,13 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
// Number of descriptors added to the result so far.
int descriptor_count = 0;
+ // Ensure that marking will not progress and change color of objects.
+ DescriptorArray::WhitenessWitness witness(*result);
+
// Copy the descriptors from the array.
for (int i = 0; i < array->number_of_descriptors(); i++) {
- if (array->GetType(i) != NULL_DESCRIPTOR) {
- result->CopyFrom(descriptor_count++, *array, i);
+ if (!array->IsNullDescriptor(i)) {
+ result->CopyFrom(descriptor_count++, *array, i, witness);
}
}
@@ -844,7 +867,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
if (result->LinearSearch(*key, descriptor_count) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
- result->Set(descriptor_count, &desc);
+ result->Set(descriptor_count, &desc, witness);
descriptor_count++;
} else {
duplicates++;
@@ -858,13 +881,13 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<DescriptorArray> new_result =
NewDescriptorArray(number_of_descriptors);
for (int i = 0; i < number_of_descriptors; i++) {
- new_result->CopyFrom(i, *result, i);
+ new_result->CopyFrom(i, *result, i, witness);
}
result = new_result;
}
// Sort the result before returning.
- result->Sort();
+ result->Sort(witness);
return result;
}
@@ -908,11 +931,26 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
- result->SetContent(*elements);
+ SetContent(result, elements);
return result;
}
+void Factory::SetContent(Handle<JSArray> array,
+ Handle<FixedArray> elements) {
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ array->SetContent(*elements));
+}
+
+
+void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ array->EnsureCanContainNonSmiElements());
+}
+
+
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(
@@ -938,11 +976,18 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
+void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ object->SetIdentityHash(hash, ALLOW_CREATION));
+}
+
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
Handle<Code> code,
- Handle<SerializedScopeInfo> scope_info) {
+ Handle<ScopeInfo> scope_info) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
shared->set_code(*code);
shared->set_scope_info(*scope_info);
@@ -990,6 +1035,12 @@ Handle<String> Factory::NumberToString(Handle<Object> number) {
}
+Handle<String> Factory::Uint32ToString(uint32_t value) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->Uint32ToString(value), String);
+}
+
+
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
@@ -1022,11 +1073,11 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
Handle<String> name,
- StrictModeFlag strict_mode) {
+ LanguageMode language_mode) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- Handle<Map> map = strict_mode == kStrictMode
- ? isolate()->strict_mode_function_without_prototype_map()
- : isolate()->function_without_prototype_map();
+ Handle<Map> map = (language_mode == CLASSIC_MODE)
+ ? isolate()->function_without_prototype_map()
+ : isolate()->strict_mode_function_without_prototype_map();
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateFunction(
*map,
@@ -1038,8 +1089,9 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
Handle<String> name,
- StrictModeFlag strict_mode) {
- Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
+ LanguageMode language_mode) {
+ Handle<JSFunction> fun =
+ NewFunctionWithoutPrototypeHelper(name, language_mode);
fun->set_context(isolate()->context()->global_context());
return fun;
}
@@ -1299,4 +1351,20 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
}
+Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
+ Heap* h = isolate()->heap();
+ if (name->Equals(h->undefined_symbol())) return undefined_value();
+ if (name->Equals(h->nan_symbol())) return nan_value();
+ if (name->Equals(h->infinity_symbol())) return infinity_value();
+ return Handle<Object>::null();
+}
+
+
+Handle<Object> Factory::ToBoolean(bool value) {
+ return Handle<Object>(value
+ ? isolate()->heap()->true_value()
+ : isolate()->heap()->false_value());
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 71ae750b3..0f028e5c5 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -50,7 +50,7 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new uninitialized fixed double array.
- Handle<FixedArray> NewFixedDoubleArray(
+ Handle<FixedDoubleArray> NewFixedDoubleArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
@@ -58,6 +58,8 @@ class Factory {
Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+ Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
+
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@@ -145,9 +147,9 @@ class Factory {
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer.
Handle<String> NewExternalStringFromAscii(
- ExternalAsciiString::Resource* resource);
+ const ExternalAsciiString::Resource* resource);
Handle<String> NewExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource);
+ const ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext();
@@ -170,7 +172,7 @@ class Factory {
// Create a 'block' context.
Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
- Handle<SerializedScopeInfo> scope_info);
+ Handle<ScopeInfo> scope_info);
// Return the Symbol matching the passed in string.
Handle<String> SymbolFromString(Handle<String> value);
@@ -203,7 +205,9 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
- Handle<Map> NewMap(InstanceType type, int instance_size);
+ Handle<Map> NewMap(InstanceType type,
+ int instance_size,
+ ElementsKind elements_kind = FAST_ELEMENTS);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -215,22 +219,22 @@ class Factory {
Handle<Map> CopyMapDropTransitions(Handle<Map> map);
- Handle<Map> GetFastElementsMap(Handle<Map> map);
-
- Handle<Map> GetSlowElementsMap(Handle<Map> map);
-
- Handle<Map> GetElementsTransitionMap(Handle<Map> map,
- ElementsKind elements_kind,
- bool safe_to_add_transition);
+ Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+ ElementsKind elements_kind);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ Handle<FixedDoubleArray> CopyFixedDoubleArray(
+ Handle<FixedDoubleArray> array);
+
// Numbers (eg, literals) are pretenured by the parser.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
- Handle<Object> NewNumberFromInt(int value);
- Handle<Object> NewNumberFromUint(uint32_t value);
+ Handle<Object> NewNumberFromInt(int32_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumberFromUint(uint32_t value,
+ PretenureFlag pretenure = NOT_TENURED);
// These objects are used by the api to create env-independent data
// structures in the heap.
@@ -258,18 +262,24 @@ class Factory {
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
+ void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
+
+ void EnsureCanContainNonSmiElements(Handle<JSArray> array);
+
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
+ void SetIdentityHash(Handle<JSObject> object, Object* hash);
+
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
Handle<JSFunction> NewFunctionWithoutPrototype(
Handle<String> name,
- StrictModeFlag strict_mode);
+ LanguageMode language_mode);
Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
@@ -283,7 +293,7 @@ class Factory {
Handle<Context> context,
PretenureFlag pretenure = TENURED);
- Handle<SerializedScopeInfo> NewSerializedScopeInfo(int length);
+ Handle<ScopeInfo> NewScopeInfo(int length);
Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
@@ -356,6 +366,7 @@ class Factory {
PropertyAttributes attributes);
Handle<String> NumberToString(Handle<Object> number);
+ Handle<String> Uint32ToString(uint32_t value);
enum ApiInstanceType {
JavaScriptObject,
@@ -400,7 +411,7 @@ class Factory {
Handle<String> name,
int number_of_literals,
Handle<Code> code,
- Handle<SerializedScopeInfo> scope_info);
+ Handle<ScopeInfo> scope_info);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
Handle<JSMessageObject> NewJSMessageObject(
@@ -442,6 +453,14 @@ class Factory {
JSRegExp::Flags flags,
int capture_count);
+ // Returns the value for a known global constant (a property of the global
+ // object which is neither configurable nor writable) like 'undefined'.
+ // Returns a null handle when the given name is unknown.
+ Handle<Object> GlobalConstantFor(Handle<String> name);
+
+ // Converts the given boolean condition to JavaScript boolean value.
+ Handle<Object> ToBoolean(bool value);
+
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
@@ -450,7 +469,7 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
Handle<String> name,
- StrictModeFlag strict_mode);
+ LanguageMode language_mode);
Handle<DescriptorArray> CopyAppendCallbackDescriptors(
Handle<DescriptorArray> array,
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h
index 94c22ecd7..ef2855793 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/fast-dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,7 +43,7 @@ enum FastDtoaMode {
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character.
-static const int kFastDtoaMaximalLength = 17;
+const int kFastDtoaMaximalLength = 17;
// Provides a decimal representation of v.
// The result should be interpreted as buffer * 10^(point - length).
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 7df2b0bf0..f145df751 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -98,20 +98,23 @@ private:
// Flags for experimental language features.
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
+DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
-DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
-DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
+DEFINE_bool(harmony_collections, false,
+ "enable harmony collections (sets, maps, and weak maps)")
+DEFINE_bool(harmony, false, "enable all harmony features")
// Flags for experimental implementation features.
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(string_slices, false, "use string slices")
+DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
+DEFINE_bool(string_slices, true, "use string slices")
+
+DEFINE_bool(clever_optimizations,
+ true,
+ "Optimize object size, Array shift, DOM strings and string +")
// Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_MIPS
- DEFINE_bool(crankshaft, false, "use crankshaft")
-#else
- DEFINE_bool(crankshaft, true, "use crankshaft")
-#endif
+DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
@@ -125,6 +128,9 @@ DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
+ true,
+ "crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
@@ -180,6 +186,8 @@ DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_bool(expose_externalize_string, false,
"expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
+DEFINE_bool(builtins_in_stack_traces, false,
+ "show built-in functions in stack traces")
DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
@@ -253,10 +261,16 @@ DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
+DEFINE_bool(trace_fragmentation, false,
+ "report fragmentation for old pointer and data pages")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc")
+DEFINE_bool(incremental_marking, true, "use incremental marking")
+DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_bool(trace_incremental_marking, false,
+ "trace progress of the incremental marking")
// v8.cc
DEFINE_bool(use_idle_notification, true,
@@ -276,8 +290,13 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
+DEFINE_bool(lazy_sweeping, true,
+ "Use lazy sweeping for old pointer and data spaces")
+DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
+ "Flush code caches in maps during mark compact cycle.")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
+DEFINE_bool(compact_code_space, false, "Compact code space")
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
@@ -288,9 +307,6 @@ DEFINE_int(random_seed, 0,
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
-DEFINE_bool(use_big_map_space, true,
- "Use big map space, but don't compact if it grew too big.")
-
DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
"Maximum number of pages in map space which still allows to encode "
"forwarding pointers. That's actually a constant, but it's useful "
@@ -305,11 +321,11 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_bool(strict_mode, true, "allow strict mode directives")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
+DEFINE_bool(check_icache, false,
+ "Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
@@ -326,7 +342,6 @@ DEFINE_bool(preemption, false,
// Regexp
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
-DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -348,11 +363,15 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit")
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
DEFINE_bool(debugger, false, "Enable JavaScript debugger")
DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
+#endif // ENABLE_DEBUGGER_SUPPORT
+
DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".")
@@ -378,6 +397,15 @@ DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
DEFINE_string(gdbjit_dump_filter, "",
"dump only objects containing this substring")
+// mark-compact.cc
+DEFINE_bool(force_marking_deque_overflows, false,
+ "force overflows of marking deque by reducing it's size "
+ "to 64 words")
+
+DEFINE_bool(stress_compaction, false,
+ "stress the GC compactor to flush out bugs (implies "
+ "--force_marking_deque_overflows)")
+
//
// Debug only flags
//
@@ -404,7 +432,6 @@ DEFINE_bool(print_json_ast, false, "print source AST as JSON")
DEFINE_bool(print_builtin_json_ast, false,
"print source AST for builtins as JSON")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -501,6 +528,9 @@ DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
#define FLAG FLAG_READONLY
#endif
+// elements.cc
+DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 7ba79bf1b..af3ae3dfb 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -68,7 +68,7 @@ inline bool StackHandler::includes(Address address) const {
inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
v->VisitPointer(context_address());
- StackFrame::IteratePc(v, pc_address(), holder);
+ v->VisitPointer(code_address());
}
@@ -77,9 +77,24 @@ inline StackHandler* StackHandler::FromAddress(Address address) {
}
-inline StackHandler::State StackHandler::state() const {
+inline bool StackHandler::is_entry() const {
+ return kind() == ENTRY;
+}
+
+
+inline bool StackHandler::is_try_catch() const {
+ return kind() == TRY_CATCH;
+}
+
+
+inline bool StackHandler::is_try_finally() const {
+ return kind() == TRY_FINALLY;
+}
+
+
+inline StackHandler::Kind StackHandler::kind() const {
const int offset = StackHandlerConstants::kStateOffset;
- return static_cast<State>(Memory::int_at(address() + offset));
+ return KindField::decode(Memory::unsigned_at(address() + offset));
}
@@ -89,9 +104,9 @@ inline Object** StackHandler::context_address() const {
}
-inline Address* StackHandler::pc_address() const {
- const int offset = StackHandlerConstants::kPCOffset;
- return reinterpret_cast<Address*>(address() + offset);
+inline Object** StackHandler::code_address() const {
+ const int offset = StackHandlerConstants::kCodeOffset;
+ return reinterpret_cast<Object**>(address() + offset);
}
@@ -105,8 +120,33 @@ inline StackHandler* StackFrame::top_handler() const {
}
+inline Code* StackFrame::LookupCode() const {
+ return GetContainingCode(isolate(), pc());
+}
+
+
inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
- return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
+ return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+ : StackFrame(iterator) {
+}
+
+
+inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+ : EntryFrame(iterator) {
+}
+
+
+inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+ : StackFrame(iterator) {
+}
+
+
+inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+ : StackFrame(iterator) {
}
@@ -155,6 +195,11 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) {
+}
+
+
Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount();
ASSERT(-1 <= index && index < param_count);
@@ -190,6 +235,26 @@ inline Object* JavaScriptFrame::function() const {
}
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+ : JavaScriptFrame(iterator) {
+}
+
+
+inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
+ StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+}
+
+
+inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) {
+}
+
+
+inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+ : InternalFrame(iterator) {
+}
+
+
template<typename Iterator>
inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
Isolate* isolate)
@@ -197,6 +262,15 @@ inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
if (!done()) Advance();
}
+
+template<typename Iterator>
+inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+ Isolate* isolate, ThreadLocalTop* top)
+ : iterator_(isolate, top) {
+ if (!done()) Advance();
+}
+
+
template<typename Iterator>
inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index bebd10a80..9fd00422a 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -366,16 +366,17 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Isolate* isolate,
- Address pc,
+ Address inner_pointer,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
- PcToCodeCache::PcToCodeCacheEntry* entry =
- isolate->pc_to_code_cache()->GetCacheEntry(pc);
+ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
+ entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
ASSERT(entry->safepoint_entry.is_valid());
} else {
- ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
+ ASSERT(entry->safepoint_entry.Equals(
+ entry->code->GetSafepointEntry(inner_pointer)));
}
// Fill in the results and return the code.
@@ -392,11 +393,16 @@ bool StackFrame::HasHandler() const {
}
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* object, Address addr);
+#endif
+
+
void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address,
Code* holder) {
Address pc = *pc_address;
- ASSERT(holder->contains(pc));
+ ASSERT(GcSafeCodeContains(holder, pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
v->VisitPointer(&code);
@@ -705,6 +711,69 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
}
+void JavaScriptFrame::PrintTop(FILE* file,
+ bool print_args,
+ bool print_line_number) {
+ // constructor calls
+ HandleScope scope;
+ AssertNoAllocation no_allocation;
+ JavaScriptFrameIterator it;
+ while (!it.done()) {
+ if (it.frame()->is_java_script()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->IsConstructor()) PrintF(file, "new ");
+ // function name
+ Object* fun = frame->function();
+ if (fun->IsJSFunction()) {
+ SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
+ shared->DebugName()->ShortPrint(file);
+ if (print_line_number) {
+ Address pc = frame->pc();
+ Code* code = Code::cast(
+ v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
+ int source_pos = code->SourcePosition(pc);
+ Object* maybe_script = shared->script();
+ if (maybe_script->IsScript()) {
+ Handle<Script> script(Script::cast(maybe_script));
+ int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+ Object* script_name_raw = script->name();
+ if (script_name_raw->IsString()) {
+ String* script_name = String::cast(script->name());
+ SmartArrayPointer<char> c_script_name =
+ script_name->ToCString(DISALLOW_NULLS,
+ ROBUST_STRING_TRAVERSAL);
+ PrintF(file, " at %s:%d", *c_script_name, line);
+ } else {
+ PrintF(file, "at <unknown>:%d", line);
+ }
+ } else {
+ PrintF(file, " at <unknown>:<unknown>");
+ }
+ }
+ } else {
+ fun->ShortPrint(file);
+ }
+
+ if (print_args) {
+ // function arguments
+ // (we are intentionally only printing the actually
+ // supplied parameters, not all parameters required)
+ PrintF(file, "(this=");
+ frame->receiver()->ShortPrint(file);
+ const int length = frame->ComputeParametersCount();
+ for (int i = 0; i < length; i++) {
+ PrintF(file, ", ");
+ frame->GetParameter(i)->ShortPrint(file);
+ }
+ PrintF(file, ")");
+ }
+ break;
+ }
+ it.Advance();
+ }
+}
+
+
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
@@ -819,7 +888,8 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
- code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
+ code = isolate()->inner_pointer_to_code_cache()->
+ GcSafeFindCodeForInnerPointer(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -881,6 +951,11 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
}
+int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
+ return Smi::cast(GetExpression(0))->value();
+}
+
+
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
@@ -927,11 +1002,15 @@ void JavaScriptFrame::Print(StringStream* accumulator,
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
- Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+ // Get scope information for nicer output, if possible. If code is NULL, or
+ // doesn't contain scope info, scope_info will return 0 for the number of
+ // parameters, stack local variables, context local variables, stack slots,
+ // or context slots.
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
- scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
+ scope_info = Handle<ScopeInfo>(shared->scope_info());
Object* script_obj = shared->script();
if (script_obj->IsScript()) {
Handle<Script> script(Script::cast(script_obj));
@@ -956,11 +1035,6 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add("(this=%o", receiver);
- // Get scope information for nicer output, if possible. If code is
- // NULL, or doesn't contain scope info, info will return 0 for the
- // number of parameters, stack slots, or context slots.
- ScopeInfo<PreallocatedStorage> info(*scope_info);
-
// Print the parameters.
int parameters_count = ComputeParametersCount();
for (int i = 0; i < parameters_count; i++) {
@@ -968,8 +1042,8 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// If we have a name for the parameter we print it. Nameless
// parameters are either because we have more actual parameters
// than formal parameters or because we have no scope information.
- if (i < info.number_of_parameters()) {
- accumulator->PrintName(*info.parameter_name(i));
+ if (i < scope_info->ParameterCount()) {
+ accumulator->PrintName(scope_info->ParameterName(i));
accumulator->Add("=");
}
accumulator->Add("%o", GetParameter(i));
@@ -987,8 +1061,8 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" {\n");
// Compute the number of locals and expression stack elements.
- int stack_locals_count = info.number_of_stack_slots();
- int heap_locals_count = info.number_of_context_slots();
+ int stack_locals_count = scope_info->StackLocalCount();
+ int heap_locals_count = scope_info->ContextLocalCount();
int expressions_count = ComputeExpressionsCount();
// Print stack-allocated local variables.
@@ -997,7 +1071,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
}
for (int i = 0; i < stack_locals_count; i++) {
accumulator->Add(" var ");
- accumulator->PrintName(*info.stack_slot_name(i));
+ accumulator->PrintName(scope_info->StackLocalName(i));
accumulator->Add(" = ");
if (i < expressions_count) {
accumulator->Add("%o", GetExpression(i));
@@ -1014,16 +1088,16 @@ void JavaScriptFrame::Print(StringStream* accumulator,
}
// Print heap-allocated local variables.
- if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
+ if (heap_locals_count > 0) {
accumulator->Add(" // heap-allocated locals\n");
}
- for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
+ for (int i = 0; i < heap_locals_count; i++) {
accumulator->Add(" var ");
- accumulator->PrintName(*info.context_slot_name(i));
+ accumulator->PrintName(scope_info->ContextLocalName(i));
accumulator->Add(" = ");
if (context != NULL) {
if (i < context->length()) {
- accumulator->Add("%o", context->get(i));
+ accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
} else {
accumulator->Add(
"// warning: missing context slot - inconsistent frame?");
@@ -1155,52 +1229,89 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
-Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+ MapWord map_word = object->map_word();
+ return map_word.IsForwardingAddress() ?
+ map_word.ToForwardingAddress()->map() : map_word.ToMap();
+}
+
+
+static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+ return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
+}
+
+
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* code, Address addr) {
+ Map* map = GcSafeMapOfCodeSpaceObject(code);
+ ASSERT(map == code->GetHeap()->code_map());
+ Address start = code->address();
+ Address end = code->address() + code->SizeFromMap(map);
+ return start <= addr && addr < end;
+}
+#endif
+
+
+Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
+ Address inner_pointer) {
Code* code = reinterpret_cast<Code*>(object);
- ASSERT(code != NULL && code->contains(pc));
+ ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
return code;
}
-Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
+ Address inner_pointer) {
Heap* heap = isolate_->heap();
- // Check if the pc points into a large object chunk.
- LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
- if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
-
- // Iterate through the 8K page until we reach the end or find an
- // object starting after the pc.
- Page* page = Page::FromAddress(pc);
- HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
- HeapObject* previous = NULL;
+ // Check if the inner pointer points into a large object chunk.
+ LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
+ if (large_page != NULL) {
+ return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
+ }
+
+ // Iterate through the page until we reach the end or find an object starting
+ // after the inner pointer.
+ Page* page = Page::FromAddress(inner_pointer);
+
+ Address addr = page->skip_list()->StartFor(inner_pointer);
+
+ Address top = heap->code_space()->top();
+ Address limit = heap->code_space()->limit();
+
while (true) {
- HeapObject* next = iterator.next();
- if (next == NULL || next->address() >= pc) {
- return GcSafeCastToCode(previous, pc);
+ if (addr == top && addr != limit) {
+ addr = limit;
+ continue;
}
- previous = next;
+
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
+ Address next_addr = addr + obj_size;
+ if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
+ addr = next_addr;
}
}
-PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
+ InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
- ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+ ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
- uint32_t index = hash & (kPcToCodeCacheSize - 1);
- PcToCodeCacheEntry* entry = cache(index);
- if (entry->pc == pc) {
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
+ uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
+ InnerPointerToCodeCacheEntry* entry = cache(index);
+ if (entry->inner_pointer == inner_pointer) {
isolate_->counters()->pc_to_code_cached()->Increment();
- ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+ ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
} else {
// Because this code may be interrupted by a profiling signal that
- // also queries the cache, we cannot update pc before the code has
- // been set. Otherwise, we risk trying to use a cache entry before
+ // also queries the cache, we cannot update inner_pointer before the code
+ // has been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
- entry->code = GcSafeFindCodeForPc(pc);
+ entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
entry->safepoint_entry.Reset();
- entry->pc = pc;
+ entry->inner_pointer = inner_pointer;
}
return entry;
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index fed11c4fa..2c5e571ed 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -49,47 +49,52 @@ class StackFrameIterator;
class ThreadLocalTop;
class Isolate;
-class PcToCodeCache {
+class InnerPointerToCodeCache {
public:
- struct PcToCodeCacheEntry {
- Address pc;
+ struct InnerPointerToCodeCacheEntry {
+ Address inner_pointer;
Code* code;
SafepointEntry safepoint_entry;
};
- explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
+ explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
Flush();
}
- Code* GcSafeFindCodeForPc(Address pc);
- Code* GcSafeCastToCode(HeapObject* object, Address pc);
+ Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+ Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
- PcToCodeCacheEntry* GetCacheEntry(Address pc);
+ InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
private:
- PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+ InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
Isolate* isolate_;
- static const int kPcToCodeCacheSize = 1024;
- PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+ static const int kInnerPointerToCodeCacheSize = 1024;
+ InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
- DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
+ DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
};
class StackHandler BASE_EMBEDDED {
public:
- enum State {
+ enum Kind {
ENTRY,
TRY_CATCH,
TRY_FINALLY
};
+ static const int kKindWidth = 2;
+ static const int kOffsetWidth = 32 - kKindWidth;
+ class KindField: public BitField<StackHandler::Kind, 0, kKindWidth> {};
+ class OffsetField: public BitField<unsigned, kKindWidth, kOffsetWidth> {};
+
// Get the address of this stack handler.
inline Address address() const;
@@ -106,16 +111,16 @@ class StackHandler BASE_EMBEDDED {
static inline StackHandler* FromAddress(Address address);
// Testers
- bool is_entry() { return state() == ENTRY; }
- bool is_try_catch() { return state() == TRY_CATCH; }
- bool is_try_finally() { return state() == TRY_FINALLY; }
+ inline bool is_entry() const;
+ inline bool is_try_catch() const;
+ inline bool is_try_finally() const;
private:
// Accessors.
- inline State state() const;
+ inline Kind kind() const;
inline Object** context_address() const;
- inline Address* pc_address() const;
+ inline Object** code_address() const;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
@@ -139,7 +144,10 @@ class StackFrame BASE_EMBEDDED {
enum Type {
NONE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES
+ NUMBER_OF_TYPES,
+ // Used by FrameScope to indicate that the stack frame is constructed
+ // manually and the FrameScope does not need to emit code.
+ MANUAL
};
#undef DECLARE_TYPE
@@ -215,9 +223,7 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
- Code* LookupCode() const {
- return GetContainingCode(isolate(), pc());
- }
+ inline Code* LookupCode() const;
// Get the code object that contains the given pc.
static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@@ -299,7 +305,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp);
protected:
- explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+ inline explicit EntryFrame(StackFrameIterator* iterator);
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
@@ -326,8 +332,7 @@ class EntryConstructFrame: public EntryFrame {
}
protected:
- explicit EntryConstructFrame(StackFrameIterator* iterator)
- : EntryFrame(iterator) { }
+ inline explicit EntryConstructFrame(StackFrameIterator* iterator);
private:
friend class StackFrameIterator;
@@ -361,7 +366,7 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state);
protected:
- explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+ inline explicit ExitFrame(StackFrameIterator* iterator);
virtual Address GetCallerStackPointer() const;
@@ -394,8 +399,7 @@ class StandardFrame: public StackFrame {
}
protected:
- explicit StandardFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) { }
+ inline explicit StandardFrame(StackFrameIterator* iterator);
virtual void ComputeCallerState(State* state) const;
@@ -513,9 +517,10 @@ class JavaScriptFrame: public StandardFrame {
return static_cast<JavaScriptFrame*>(frame);
}
+ static void PrintTop(FILE* file, bool print_args, bool print_line_number);
+
protected:
- explicit JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) { }
+ inline explicit JavaScriptFrame(StackFrameIterator* iterator);
virtual Address GetCallerStackPointer() const;
@@ -552,8 +557,7 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
- explicit OptimizedFrame(StackFrameIterator* iterator)
- : JavaScriptFrame(iterator) { }
+ inline explicit OptimizedFrame(StackFrameIterator* iterator);
private:
friend class StackFrameIterator;
@@ -581,12 +585,9 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const;
protected:
- explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
- : JavaScriptFrame(iterator) { }
+ inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
- virtual int GetNumberOfIncomingArguments() const {
- return Smi::cast(GetExpression(0))->value();
- }
+ virtual int GetNumberOfIncomingArguments() const;
virtual Address GetCallerStackPointer() const;
@@ -611,8 +612,7 @@ class InternalFrame: public StandardFrame {
}
protected:
- explicit InternalFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) { }
+ inline explicit InternalFrame(StackFrameIterator* iterator);
virtual Address GetCallerStackPointer() const;
@@ -633,8 +633,7 @@ class ConstructFrame: public InternalFrame {
}
protected:
- explicit ConstructFrame(StackFrameIterator* iterator)
- : InternalFrame(iterator) { }
+ inline explicit ConstructFrame(StackFrameIterator* iterator);
private:
friend class StackFrameIterator;
@@ -710,20 +709,26 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
+ inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
+
// Skip frames until the frame with the given id is reached.
explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
- JavaScriptFrameIteratorTemp(Address fp, Address sp,
- Address low_bound, Address high_bound) :
+ JavaScriptFrameIteratorTemp(Address fp,
+ Address sp,
+ Address low_bound,
+ Address high_bound) :
iterator_(fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}
JavaScriptFrameIteratorTemp(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound) :
+ Address fp,
+ Address sp,
+ Address low_bound,
+ Address high_bound) :
iterator_(isolate, fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 807387413..04086d483 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -244,11 +244,6 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
}
-void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
- Visit(expr->expression());
-}
-
-
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
@@ -291,12 +286,16 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+ code->set_handler_table(*cgen.handler_table());
+#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_has_debug_break_slots(
info->isolate()->debugger()->IsDebuggerActive());
+ code->set_compiled_optimizable(info->IsOptimizable());
+#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // may be an empty handle.
+ info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
@@ -363,7 +362,7 @@ void FullCodeGenerator::RecordJSReturnSite(Call* call) {
}
-void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
// There's no need to prepare this code for bailouts from already optimized
// code or code that can't be optimized.
if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
@@ -384,10 +383,11 @@ void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
}
-void FullCodeGenerator::RecordStackCheck(int ast_id) {
+void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
// The pc offset does not need to be encoded and packed together with a
// state.
- BailoutEntry entry = { ast_id, masm_->pc_offset() };
+ ASSERT(masm_->pc_offset() > 0);
+ BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry);
}
@@ -412,27 +412,24 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
__ push(reg);
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(Register reg) const {
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
void FullCodeGenerator::EffectContext::PlugTOS() const {
__ Drop(1);
- codegen()->decrement_stack_height();
}
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
__ pop(result_register());
- codegen()->decrement_stack_height();
}
@@ -443,8 +440,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
__ pop(result_register());
- codegen()->decrement_stack_height();
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -523,8 +519,8 @@ void FullCodeGenerator::VisitDeclarations(
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
+ if (var->binding_needs_init()) {
+ // In case this binding needs initialization use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
@@ -549,11 +545,10 @@ void FullCodeGenerator::VisitDeclarations(
int FullCodeGenerator::DeclareGlobalsFlags() {
- int flags = 0;
- if (is_eval()) flags |= kDeclareGlobalsEvalFlag;
- if (is_strict_mode()) flags |= kDeclareGlobalsStrictModeFlag;
- if (is_native()) flags |= kDeclareGlobalsNativeFlag;
- return flags;
+ ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
+ return DeclareGlobalsEvalFlag::encode(is_eval()) |
+ DeclareGlobalsNativeFlag::encode(is_native()) |
+ DeclareGlobalsLanguageMode::encode(language_mode());
}
@@ -659,14 +654,13 @@ FullCodeGenerator::InlineFunctionGenerator
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
- ZoneList<Expression*>* args = node->arguments();
- const Runtime::Function* function = node->function();
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+ const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
ASSERT(function->intrinsic_type == Runtime::INLINE);
InlineFunctionGenerator generator =
FindInlineFunctionGenerator(function->function_id);
- ((*this).*(generator))(args);
+ ((*this).*(generator))(expr);
}
@@ -683,11 +677,25 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
+void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
+ if (context()->IsEffect()) {
+ VisitForEffect(expr);
+ } else if (context()->IsAccumulatorValue()) {
+ VisitForAccumulatorValue(expr);
+ } else if (context()->IsStackValue()) {
+ VisitForStackValue(expr);
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ VisitForControl(expr, test->true_label(), test->false_label(),
+ test->fall_through());
+ }
+}
+
+
void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
Comment cmnt(masm_, "[ Comma");
VisitForEffect(expr->left());
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitInCurrentContext(expr->right());
+ VisitInDuplicateContext(expr->right());
}
@@ -709,7 +717,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
}
PrepareForBailoutForId(right_id, NO_REGISTERS);
__ bind(&eval_right);
- ForwardBailoutToChild(expr);
} else if (context()->IsAccumulatorValue()) {
VisitForAccumulatorValue(left);
@@ -717,7 +724,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
// case we need it.
__ push(result_register());
Label discard, restore;
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
if (is_logical_and) {
DoTest(left, &discard, &restore, &restore);
} else {
@@ -736,7 +742,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
// case we need it.
__ push(result_register());
Label discard;
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
if (is_logical_and) {
DoTest(left, &discard, &done, &discard);
} else {
@@ -758,7 +763,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
__ bind(&eval_right);
}
- VisitInCurrentContext(right);
+ VisitInDuplicateContext(right);
__ bind(&done);
}
@@ -785,34 +790,6 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
}
-void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
- if (!info_->HasDeoptimizationSupport()) return;
- ASSERT(context()->IsTest());
- ASSERT(expr == forward_bailout_stack_->expr());
- forward_bailout_pending_ = forward_bailout_stack_;
-}
-
-
-void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
- if (context()->IsTest()) {
- ForwardBailoutStack stack(expr, forward_bailout_pending_);
- ForwardBailoutStack* saved = forward_bailout_stack_;
- forward_bailout_pending_ = NULL;
- forward_bailout_stack_ = &stack;
- Visit(expr);
- forward_bailout_stack_ = saved;
- } else {
- ASSERT(forward_bailout_pending_ == NULL);
- Visit(expr);
- State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
- PrepareForBailout(expr, state);
- // Forwarding bailouts to children is a one shot operation. It should have
- // been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
- }
-}
-
-
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
NestedBlock nested_block(this, stmt);
@@ -823,9 +800,18 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
if (stmt->block_scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope();
- __ Push(scope_->GetSerializedScopeInfo());
+ Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+ __ Push(scope_info);
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+ FastNewBlockContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
+ }
+
+ // Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -972,7 +958,6 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
VisitForStackValue(stmt->expression());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushWithContext, 2);
- decrement_stack_height();
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{ WithOrCatch body(this);
@@ -1103,20 +1088,17 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Comment cmnt(masm_, "[ TryCatchStatement");
SetStatementPosition(stmt);
- // The try block adds a handler to the exception handler chain
- // before entering, and removes it again when exiting normally.
- // If an exception is thrown during execution of the try block,
- // control is passed to the handler, which also consumes the handler.
- // At this point, the exception is in a register, and store it in
- // the temporary local variable (prints as ".catch-var") before
- // executing the catch block. The catch block has been rewritten
- // to introduce a new scope to bind the catch variable and to remove
- // that scope again afterwards.
-
- Label try_handler_setup, done;
- __ Call(&try_handler_setup);
- // Try handler code, exception in result register.
-
+ // The try block adds a handler to the exception handler chain before
+ // entering, and removes it again when exiting normally. If an exception
+ // is thrown during execution of the try block, the handler is consumed
+ // and control is passed to the catch block with the exception in the
+ // result register.
+
+ Label try_entry, handler_entry, exit;
+ __ jmp(&try_entry);
+ __ bind(&handler_entry);
+ handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
+ // Exception handler code, the exception is in the result register.
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
__ Push(stmt->variable()->name());
@@ -1130,27 +1112,23 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Scope* saved_scope = scope();
scope_ = stmt->scope();
ASSERT(scope_->declarations()->is_empty());
- { WithOrCatch body(this);
+ { WithOrCatch catch_body(this);
Visit(stmt->catch_block());
}
// Restore the context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
scope_ = saved_scope;
- __ jmp(&done);
+ __ jmp(&exit);
// Try block code. Sets up the exception handler chain.
- __ bind(&try_handler_setup);
- {
- const int delta = StackHandlerConstants::kSize / kPointerSize;
- TryCatch try_block(this);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
- increment_stack_height(delta);
+ __ bind(&try_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER, stmt->index());
+ { TryCatch try_body(this);
Visit(stmt->try_block());
- __ PopTryHandler();
- decrement_stack_height(delta);
}
- __ bind(&done);
+ __ PopTryHandler();
+ __ bind(&exit);
}
@@ -1162,12 +1140,12 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
//
// The try-finally construct can enter the finally block in three ways:
// 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
+ // calls the finally block code before continuing.
// 2. By exiting the try-block with a function-local control flow transfer
// (break/continue/return). The site of the, e.g., break removes the
// try handler and calls the finally block code before continuing
// its outward control transfer.
- // 3. by exiting the try-block with a thrown exception.
+ // 3. By exiting the try-block with a thrown exception.
// This can happen in nested function calls. It traverses the try-handler
// chain and consumes the try-handler entry before jumping to the
// handler code. The handler code then calls the finally-block before
@@ -1178,49 +1156,39 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// exception) in the result register (rax/eax/r0), both of which must
// be preserved. The return address isn't GC-safe, so it should be
// cooked before GC.
- Label finally_entry;
- Label try_handler_setup;
- const int original_stack_height = stack_height();
-
- // Setup the try-handler chain. Use a call to
- // Jump to try-handler setup and try-block code. Use call to put try-handler
- // address on stack.
- __ Call(&try_handler_setup);
- // Try handler code. Return address of call is pushed on handler stack.
- {
- // This code is only executed during stack-handler traversal when an
- // exception is thrown. The exception is in the result register, which
- // is retained by the finally block.
- // Call the finally block and then rethrow the exception if it returns.
- __ Call(&finally_entry);
- __ push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
- }
+ Label try_entry, handler_entry, finally_entry;
+
+ // Jump to try-handler setup and try-block code.
+ __ jmp(&try_entry);
+ __ bind(&handler_entry);
+ handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
+ // Exception handler code. This code is only executed when an exception
+ // is thrown. The exception is in the result register, and must be
+ // preserved by the finally block. Call the finally block and then
+ // rethrow the exception if it returns.
+ __ Call(&finally_entry);
+ __ push(result_register());
+ __ CallRuntime(Runtime::kReThrow, 1);
+ // Finally block implementation.
__ bind(&finally_entry);
- {
- // Finally block implementation.
- Finally finally_block(this);
- EnterFinallyBlock();
- set_stack_height(original_stack_height + Finally::kElementCount);
+ EnterFinallyBlock();
+ { Finally finally_body(this);
Visit(stmt->finally_block());
- ExitFinallyBlock(); // Return to the calling code.
}
+ ExitFinallyBlock(); // Return to the calling code.
- __ bind(&try_handler_setup);
- {
- // Setup try handler (stack pointer registers).
- const int delta = StackHandlerConstants::kSize / kPointerSize;
- TryFinally try_block(this, &finally_entry);
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
- set_stack_height(original_stack_height + delta);
+ // Setup try handler.
+ __ bind(&try_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index());
+ { TryFinally try_body(this, &finally_entry);
Visit(stmt->try_block());
- __ PopTryHandler();
- set_stack_height(original_stack_height);
}
+ __ PopTryHandler();
// Execute the finally block on the way out. Clobber the unpredictable
- // value in the accumulator with one that's safe for GC. The finally
- // block will unconditionally preserve the accumulator on the stack.
+ // value in the result register with one that's safe for GC because the
+ // finally block will unconditionally preserve the result register on the
+ // stack.
ClearAccumulator();
__ Call(&finally_entry);
}
@@ -1246,7 +1214,6 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
expr->then_expression_position());
- int start_stack_height = stack_height();
if (context()->IsTest()) {
const TestContext* for_test = TestContext::cast(context());
VisitForControl(expr->then_expression(),
@@ -1254,17 +1221,15 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
for_test->false_label(),
NULL);
} else {
- VisitInCurrentContext(expr->then_expression());
+ VisitInDuplicateContext(expr->then_expression());
__ jmp(&done);
}
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
- set_stack_height(start_stack_height);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
expr->else_expression_position());
- VisitInCurrentContext(expr->else_expression());
+ VisitInDuplicateContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
__ bind(&done);
@@ -1301,11 +1266,8 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
- // Throw has no effect on the stack height or the current expression context.
- // Usually the expression context is null, because throw is a statement.
VisitForStackValue(expr->exception());
__ CallRuntime(Runtime::kThrow, 1);
- decrement_stack_height();
// Never returns here.
}
@@ -1321,19 +1283,21 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
}
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- Expression *expr;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
+ Expression *sub_expr;
Handle<String> check;
- if (compare->IsLiteralCompareTypeof(&expr, &check)) {
- EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ EmitLiteralCompareTypeof(expr, sub_expr, check);
+ return true;
+ }
+
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
- if (compare->IsLiteralCompareUndefined(&expr)) {
- EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+ if (expr->IsLiteralCompareNull(&sub_expr)) {
+ EmitLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 803c61873..fbb697924 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -83,12 +83,9 @@ class FullCodeGenerator: public AstVisitor {
scope_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
- stack_height_(0),
context_(NULL),
bailout_entries_(0),
- stack_checks_(2), // There's always at least one.
- forward_bailout_stack_(NULL),
- forward_bailout_pending_(NULL) {
+ stack_checks_(2) { // There's always at least one.
}
static bool MakeCode(CompilationInfo* info);
@@ -96,6 +93,8 @@ class FullCodeGenerator: public AstVisitor {
void Generate(CompilationInfo* info);
void PopulateDeoptimizationData(Handle<Code> code);
+ Handle<FixedArray> handler_table() { return handler_table_; }
+
class StateField : public BitField<State, 0, 8> { };
class PcField : public BitField<unsigned, 8, 32-8> { };
@@ -276,27 +275,8 @@ class FullCodeGenerator: public AstVisitor {
}
};
- // The forward bailout stack keeps track of the expressions that can
- // bail out to just before the control flow is split in a child
- // node. The stack elements are linked together through the parent
- // link when visiting expressions in test contexts after requesting
- // bailout in child forwarding.
- class ForwardBailoutStack BASE_EMBEDDED {
- public:
- ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
- : expr_(expr), parent_(parent) { }
-
- Expression* expr() const { return expr_; }
- ForwardBailoutStack* parent() const { return parent_; }
-
- private:
- Expression* const expr_;
- ForwardBailoutStack* const parent_;
- };
-
// Type of a member function that generates inline code for a native function.
- typedef void (FullCodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
+ typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
static const InlineFunctionGenerator kInlineFunctionGenerators[];
@@ -357,23 +337,22 @@ class FullCodeGenerator: public AstVisitor {
// need the write barrier if location is CONTEXT.
MemOperand VarOperand(Variable* var, Register scratch);
- // Forward the bailout responsibility for the given expression to
- // the next child visited (which must be in a test context).
- void ForwardBailoutToChild(Expression* expr);
-
void VisitForEffect(Expression* expr) {
EffectContext context(this);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ PrepareForBailout(expr, NO_REGISTERS);
}
void VisitForAccumulatorValue(Expression* expr) {
AccumulatorValueContext context(this);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ PrepareForBailout(expr, TOS_REG);
}
void VisitForStackValue(Expression* expr) {
StackValueContext context(this);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ PrepareForBailout(expr, NO_REGISTERS);
}
void VisitForControl(Expression* expr,
@@ -381,9 +360,14 @@ class FullCodeGenerator: public AstVisitor {
Label* if_false,
Label* fall_through) {
TestContext context(this, expr, if_true, if_false, fall_through);
- VisitInCurrentContext(expr);
+ Visit(expr);
+ // For test contexts, we prepare for bailout before branching, not at
+ // the end of the entire expression. This happens as part of visiting
+ // the expression.
}
+ void VisitInDuplicateContext(Expression* expr);
+
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
@@ -391,29 +375,22 @@ class FullCodeGenerator: public AstVisitor {
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
- bool TryLiteralCompare(CompareOperation* compare,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
+ bool TryLiteralCompare(CompareOperation* compare);
// Platform-specific code for comparing the type of a value with
// a given literal string.
void EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-
- // Platform-specific code for strict equality comparison with
- // the undefined value.
- void EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
+ Expression* sub_expr,
+ Handle<String> check);
+
+ // Platform-specific code for equality comparison with a nil-like value.
+ void EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil);
// Bailout support.
void PrepareForBailout(Expression* node, State state);
- void PrepareForBailoutForId(int id, State state);
+ void PrepareForBailoutForId(unsigned id, State state);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -424,7 +401,7 @@ class FullCodeGenerator: public AstVisitor {
// canonical JS true value so we will insert a (dead) test against true at
// the actual bailout target from the optimized code. If not
// should_normalize, the true and false labels are ignored.
- void PrepareForBailoutBeforeSplit(State state,
+ void PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false);
@@ -432,7 +409,7 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count);
@@ -440,7 +417,7 @@ class FullCodeGenerator: public AstVisitor {
// a loop.
void EmitStackCheck(IterationStatement* stmt);
// Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(int osr_ast_id);
+ void RecordStackCheck(unsigned osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@@ -459,7 +436,7 @@ class FullCodeGenerator: public AstVisitor {
void EmitInlineRuntimeCall(CallRuntime* expr);
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
- void Emit##name(ZoneList<Expression*>* arguments);
+ void Emit##name(CallRuntime* expr);
INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
@@ -475,13 +452,8 @@ class FullCodeGenerator: public AstVisitor {
Label* done);
void EmitVariableLoad(VariableProxy* proxy);
- enum ResolveEvalFlag {
- SKIP_CONTEXT_LOOKUP,
- PERFORM_CONTEXT_LOOKUP
- };
-
// Expects the arguments and the function already pushed.
- void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
+ void EmitResolvePossiblyDirectEval(int arg_count);
// Platform-specific support for allocating a new closure based on
// the given function info.
@@ -548,35 +520,6 @@ class FullCodeGenerator: public AstVisitor {
loop_depth_--;
}
-#if defined(V8_TARGET_ARCH_IA32)
- int stack_height() { return stack_height_; }
- void set_stack_height(int depth) { stack_height_ = depth; }
- void increment_stack_height() { stack_height_++; }
- void increment_stack_height(int delta) { stack_height_ += delta; }
- void decrement_stack_height() {
- if (FLAG_verify_stack_height) {
- ASSERT(stack_height_ > 0);
- }
- stack_height_--;
- }
- void decrement_stack_height(int delta) {
- stack_height_-= delta;
- if (FLAG_verify_stack_height) {
- ASSERT(stack_height_ >= 0);
- }
- }
- // Call this function only if FLAG_verify_stack_height is true.
- void verify_stack_height(); // Generates a runtime check of esp - ebp.
-#else
- int stack_height() { return 0; }
- void set_stack_height(int depth) {}
- void increment_stack_height() {}
- void increment_stack_height(int delta) {}
- void decrement_stack_height() {}
- void decrement_stack_height(int delta) {}
- void verify_stack_height() {}
-#endif // V8_TARGET_ARCH_IA32
-
MacroAssembler* masm() { return masm_; }
class ExpressionContext;
@@ -586,9 +529,11 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- bool is_strict_mode() { return function()->strict_mode(); }
- StrictModeFlag strict_mode_flag() {
- return is_strict_mode() ? kStrictMode : kNonStrictMode;
+ bool is_classic_mode() {
+ return language_mode() == CLASSIC_MODE;
+ }
+ LanguageMode language_mode() {
+ return function()->language_mode();
}
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@@ -618,7 +563,6 @@ class FullCodeGenerator: public AstVisitor {
void VisitComma(BinaryOperation* expr);
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
- void VisitInCurrentContext(Expression* expr);
void VisitForTypeofValue(Expression* expr);
@@ -637,10 +581,6 @@ class FullCodeGenerator: public AstVisitor {
virtual ~ExpressionContext() {
codegen_->set_new_context(old_);
- if (FLAG_verify_stack_height) {
- ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
- codegen()->verify_stack_height();
- }
}
Isolate* isolate() const { return codegen_->isolate(); }
@@ -694,7 +634,6 @@ class FullCodeGenerator: public AstVisitor {
FullCodeGenerator* codegen() const { return codegen_; }
MacroAssembler* masm() const { return masm_; }
MacroAssembler* masm_;
- int expected_stack_height_; // The expected stack height esp - ebp on exit.
private:
const ExpressionContext* old_;
@@ -704,9 +643,7 @@ class FullCodeGenerator: public AstVisitor {
class AccumulatorValueContext : public ExpressionContext {
public:
explicit AccumulatorValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) {
- expected_stack_height_ = codegen->stack_height();
- }
+ : ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -727,9 +664,7 @@ class FullCodeGenerator: public AstVisitor {
class StackValueContext : public ExpressionContext {
public:
explicit StackValueContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) {
- expected_stack_height_ = codegen->stack_height() + 1;
- }
+ : ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -758,9 +693,7 @@ class FullCodeGenerator: public AstVisitor {
condition_(condition),
true_label_(true_label),
false_label_(false_label),
- fall_through_(fall_through) {
- expected_stack_height_ = codegen->stack_height();
- }
+ fall_through_(fall_through) { }
static const TestContext* cast(const ExpressionContext* context) {
ASSERT(context->IsTest());
@@ -797,10 +730,7 @@ class FullCodeGenerator: public AstVisitor {
class EffectContext : public ExpressionContext {
public:
explicit EffectContext(FullCodeGenerator* codegen)
- : ExpressionContext(codegen) {
- expected_stack_height_ = codegen->stack_height();
- }
-
+ : ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
@@ -824,12 +754,10 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
- int stack_height_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_;
- ForwardBailoutStack* forward_bailout_stack_;
- ForwardBailoutStack* forward_bailout_pending_;
+ Handle<FixedArray> handler_table_;
friend class NestedStatement;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 68cb0533b..b386bed17 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1115,13 +1115,13 @@ class DebugInfoSection : public DebugSection {
int context_slots = scope_info.number_of_context_slots();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope_info.NumberOfLocals();
+ int locals = scope_info.LocalCount();
int current_abbreviation = 4;
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+ *scope_info.ParameterName(param)->ToCString(DISALLOW_NULLS));
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1312,7 +1312,7 @@ class DebugAbbrevSection : public DebugSection {
int context_slots = scope_info.number_of_context_slots();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope_info.NumberOfLocals();
+ int locals = scope_info.LocalCount();
int total_children =
params + slots + context_slots + internal_slots + locals + 2;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 6c6966aee..30b676c8b 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -230,6 +230,9 @@ const int kPointerSize = sizeof(void*); // NOLINT
const int kDoubleSizeLog2 = 3;
+// Size of the state of a the random number generator.
+const int kRandomStateSize = 2 * kIntSize;
+
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
@@ -255,6 +258,10 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+
// ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
@@ -287,7 +294,7 @@ const uint32_t kMaxAsciiCharCodeU = 0x7fu;
// The USE(x) template is used to silence C++ compiler warnings
// issued for (yet) unused variables (typically parameters).
template <typename T>
-static inline void USE(T) { }
+inline void USE(T) { }
// FUNCTION_ADDR(f) gets the address of a C function f.
@@ -351,6 +358,39 @@ F FUNCTION_CAST(Address addr) {
class FreeStoreAllocationPolicy;
template <typename T, class P = FreeStoreAllocationPolicy> class List;
+// -----------------------------------------------------------------------------
+// Declarations for use in both the preparser and the rest of V8.
+
+// The different language modes that V8 implements. ES5 defines two language
+// modes: an unrestricted mode respectively a strict mode which are indicated by
+// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
+// for the next ES standard specify a new third mode which is called 'extended
+// mode'. The extended mode is only available if the harmony flag is set. It is
+// based on the 'strict mode' and adds new functionality to it. This means that
+// most of the semantics of these two modes coincide.
+//
+// In the current draft the term 'base code' is used to refer to code that is
+// neither in strict nor extended mode. However, the more distinguishing term
+// 'classic mode' is used in V8 instead to avoid mix-ups.
+
+enum LanguageMode {
+ CLASSIC_MODE,
+ STRICT_MODE,
+ EXTENDED_MODE
+};
+
+
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+//
+// This flag is used in the backend to represent the language mode. So far
+// there is no semantic difference between the strict and the extended mode in
+// the backend, so both modes are represented by the kStrictMode value.
+enum StrictModeFlag {
+ kNonStrictMode,
+ kStrictMode
+};
+
+
} } // namespace v8::internal
#endif // V8_GLOBALS_H_
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 35c363c10..2ff797d07 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -190,7 +190,11 @@ static int ExpectedNofPropertiesFromEstimate(int estimate) {
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
- return estimate + 8;
+ if (FLAG_clever_optimizations) {
+ return estimate + 8;
+ } else {
+ return estimate + 3;
+ }
}
@@ -372,24 +376,6 @@ Handle<Object> GetProperty(Handle<Object> obj,
}
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- Handle<String> name,
- LookupResult* result) {
- PropertyAttributes attributes;
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(isolate,
- obj->GetProperty(*obj, result, *name, &attributes),
- Object);
-}
-
-
-Handle<Object> GetElement(Handle<Object> obj,
- uint32_t index) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
-}
-
-
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@@ -421,17 +407,18 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) {
}
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
- JSObject::HiddenPropertiesFlag flag) {
+Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+ Handle<String> key,
+ Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->GetHiddenProperties(flag),
+ obj->SetHiddenProperty(*key, *value),
Object);
}
-int GetIdentityHash(Handle<JSObject> obj) {
+int GetIdentityHash(Handle<JSReceiver> obj) {
CALL_AND_RETRY(obj->GetIsolate(),
- obj->GetIdentityHash(JSObject::ALLOW_CREATION),
+ obj->GetIdentityHash(ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
@@ -499,6 +486,14 @@ Handle<Object> SetOwnElement(Handle<JSObject> object,
}
+Handle<Object> TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->TransitionElementsKind(to_kind),
+ Object);
+}
+
+
Handle<JSObject> Copy(Handle<JSObject> obj) {
Isolate* isolate = obj->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
@@ -521,8 +516,9 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
Handle<Object> cache = Utils::OpenHandle(*handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
- ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
- foreign->set_address(0);
+ ASSERT(foreign->foreign_address() ==
+ reinterpret_cast<Address>(cache.location()));
+ foreign->set_foreign_address(0);
Isolate* isolate = Isolate::Current();
isolate->global_handles()->Destroy(cache.location());
isolate->counters()->script_wrappers()->Decrement();
@@ -530,10 +526,10 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
- if (script->wrapper()->address() != NULL) {
+ if (script->wrapper()->foreign_address() != NULL) {
// Return the script wrapper directly from the cache.
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->address()));
+ reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
Isolate* isolate = Isolate::Current();
// Construct a new script wrapper.
@@ -549,7 +545,8 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
Handle<Object> handle = isolate->global_handles()->Create(*result);
isolate->global_handles()->MakeWeak(handle.location(), NULL,
&ClearWrapperCache);
- script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
+ script->wrapper()->set_foreign_address(
+ reinterpret_cast<Address>(handle.location()));
return result;
}
@@ -665,6 +662,19 @@ int GetScriptLineNumber(Handle<Script> script, int code_pos) {
return right + script->line_offset()->value();
}
+// Convert code position into column number.
+int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
+ int line_number = GetScriptLineNumber(script, code_pos);
+ if (line_number == -1) return -1;
+
+ AssertNoAllocation no_allocation;
+ FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+ line_number = line_number - script->line_offset()->value();
+ if (line_number == 0) return code_pos + script->column_offset()->value();
+ int prev_line_end_pos =
+ Smi::cast(line_ends_array->get(line_number - 1))->value();
+ return code_pos - (prev_line_end_pos + 1);
+}
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
AssertNoAllocation no_allocation;
@@ -696,7 +706,7 @@ void CustomArguments::IterateInstance(ObjectVisitor* v) {
// Compute the property keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
@@ -718,7 +728,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
// Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
@@ -749,8 +759,9 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
}
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
- KeyCollectionType type) {
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
+ KeyCollectionType type,
+ bool* threw) {
USE(ContainsOnlyValidKeys);
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
@@ -765,6 +776,16 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
for (Handle<Object> p = object;
*p != isolate->heap()->null_value();
p = Handle<Object>(p->GetPrototype(), isolate)) {
+ if (p->IsJSProxy()) {
+ Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
+ Handle<Object> args[] = { proxy };
+ Handle<Object> names = Execution::Call(
+ isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+ if (*threw) return content;
+ content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
+ break;
+ }
+
Handle<JSObject> current(JSObject::cast(*p), isolate);
// Check access rights if required.
@@ -831,11 +852,11 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
}
-Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
+Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
Isolate* isolate = object->GetIsolate();
isolate->counters()->for_in()->Increment();
- Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
- INCLUDE_PROTOS);
+ Handle<FixedArray> elements =
+ GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
return isolate->factory()->NewJSArrayWithElements(elements);
}
@@ -885,62 +906,29 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<JSObject> key,
- Handle<Object> value) {
+Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Put(*key, *value),
- ObjectHashTable);
-}
-
-
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- return shared->is_compiled() || CompileLazyShared(shared, flag);
-}
-
-
-static bool CompileLazyHelper(CompilationInfo* info,
- ClearExceptionFlag flag) {
- // Compile the source information to a code object.
- ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!info->isolate()->has_pending_exception());
- bool result = Compiler::CompileLazy(info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) {
- info->isolate()->clear_pending_exception();
- }
- return result;
+ table->Add(*key),
+ ObjectHashSet);
}
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- CompilationInfo info(shared);
- return CompileLazyHelper(&info, flag);
+Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ CALL_HEAP_FUNCTION(table->GetIsolate(),
+ table->Remove(*key),
+ ObjectHashSet);
}
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
- bool result = true;
- if (function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- function->shared()->set_code_age(0);
- } else {
- CompilationInfo info(function);
- result = CompileLazyHelper(&info, flag);
- ASSERT(!result || function->is_compiled());
- }
- return result;
+Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(table->GetIsolate(),
+ table->Put(*key, *value),
+ ObjectHashTable);
}
-bool CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
- ClearExceptionFlag flag) {
- CompilationInfo info(function);
- info.SetOptimizing(osr_ast_id);
- return CompileLazyHelper(&info, flag);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 7eaf4de92..cfa65b378 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -224,12 +224,6 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes = NONE);
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
@@ -240,20 +234,15 @@ Handle<Object> SetOwnElement(Handle<JSObject> object,
Handle<Object> value,
StrictModeFlag strict_mode);
+Handle<Object> TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
+
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key);
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- Handle<String> name,
- LookupResult* result);
-
-
-Handle<Object> GetElement(Handle<Object> obj,
- uint32_t index);
-
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@@ -263,14 +252,13 @@ Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-// Return the object's hidden properties object. If the object has no hidden
-// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
-// hidden property object will be allocated. Otherwise Heap::undefined_value
-// is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
- JSObject::HiddenPropertiesFlag flag);
+// Sets a hidden property on an object. Returns obj on success, undefined
+// if trying to set the property on a detached proxy.
+Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+ Handle<String> key,
+ Handle<Object> value);
-int GetIdentityHash(Handle<JSObject> obj);
+int GetIdentityHash(Handle<JSReceiver> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@@ -298,21 +286,23 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> string,
int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
+int GetScriptColumnNumber(Handle<Script> script, int code_position);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object);
enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
- KeyCollectionType type);
-Handle<JSArray> GetKeysFor(Handle<JSObject> object);
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
+ KeyCollectionType type,
+ bool* threw);
+Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result);
@@ -347,25 +337,15 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object);
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<JSObject> key,
- Handle<Object> value);
-
-// Does lazy compilation of the given function. Returns true on success and
-// false if the compilation resulted in a stack overflow.
-enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
-
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
+Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
+ Handle<Object> key);
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
+Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
+ Handle<Object> key);
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-
-bool CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
- ClearExceptionFlag flag);
+Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value);
class NoHandleAllocation BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/hashmap.cc b/deps/v8/src/hashmap.cc
index 1422afdc7..0b404a97e 100644
--- a/deps/v8/src/hashmap.cc
+++ b/deps/v8/src/hashmap.cc
@@ -36,13 +36,7 @@
namespace v8 {
namespace internal {
-Allocator HashMap::DefaultAllocator;
-
-
-HashMap::HashMap() {
- allocator_ = NULL;
- match_ = NULL;
-}
+Allocator* HashMap::DefaultAllocator = ::new Allocator();
HashMap::HashMap(MatchFun match,
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index 5c13212eb..d2d1fafa3 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -46,19 +46,14 @@ class Allocator BASE_EMBEDDED {
class HashMap {
public:
- static Allocator DefaultAllocator;
+ static Allocator* DefaultAllocator;
typedef bool (*MatchFun) (void* key1, void* key2);
- // Dummy constructor. This constructor doesn't set up the hash
- // map properly so don't use it unless you have good reason (e.g.,
- // you know that the HashMap will never be used).
- HashMap();
-
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
explicit HashMap(MatchFun match,
- Allocator* allocator = &DefaultAllocator,
+ Allocator* allocator = DefaultAllocator,
uint32_t initial_capacity = 8);
~HashMap();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 7b666af5b..8977cdb46 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -33,15 +33,51 @@
#include "list-inl.h"
#include "objects.h"
#include "v8-counters.h"
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
namespace v8 {
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
+ if (emergency_stack_ != NULL) {
+ emergency_stack_->Add(Entry(target, size));
+ return;
+ }
+
+ if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
+ NewSpacePage* rear_page =
+ NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
+ ASSERT(!rear_page->prev_page()->is_anchor());
+ rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
+ ActivateGuardIfOnTheSamePage();
+ }
+
+ if (guard_) {
+ ASSERT(GetHeadPage() ==
+ Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
+
+ if ((rear_ - 2) < limit_) {
+ RelocateQueueHead();
+ emergency_stack_->Add(Entry(target, size));
+ return;
+ }
+ }
+
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
- ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
+#ifdef DEBUG
+ SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+ reinterpret_cast<Address>(rear_));
+#endif
+}
+
+
+void PromotionQueue::ActivateGuardIfOnTheSamePage() {
+ guard_ = guard_ ||
+ heap_->new_space()->active_space()->current_page()->address() ==
+ GetHeadPage()->address();
}
@@ -84,7 +120,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -117,7 +153,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -181,7 +217,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
- result = lo_space_->AllocateRaw(size_in_bytes);
+ result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
@@ -193,19 +229,21 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
}
-MaybeObject* Heap::NumberFromInt32(int32_t value) {
+MaybeObject* Heap::NumberFromInt32(
+ int32_t value, PretenureFlag pretenure) {
if (Smi::IsValid(value)) return Smi::FromInt(value);
// Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastI2D(value));
+ return AllocateHeapNumber(FastI2D(value), pretenure);
}
-MaybeObject* Heap::NumberFromUint32(uint32_t value) {
+MaybeObject* Heap::NumberFromUint32(
+ uint32_t value, PretenureFlag pretenure) {
if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
return Smi::FromInt((int32_t)value);
}
// Bypass NumberFromDouble to avoid various redundant checks.
- return AllocateHeapNumber(FastUI2D(value));
+ return AllocateHeapNumber(FastUI2D(value), pretenure);
}
@@ -220,10 +258,8 @@ void Heap::FinalizeExternalString(String* string) {
// Dispose of the C++ object if it has not already been disposed.
if (*resource_addr != NULL) {
(*resource_addr)->Dispose();
+ *resource_addr = NULL;
}
-
- // Clear the resource pointer in the string.
- *resource_addr = NULL;
}
@@ -265,6 +301,11 @@ bool Heap::InNewSpace(Object* object) {
}
+bool Heap::InNewSpace(Address addr) {
+ return new_space_.Contains(addr);
+}
+
+
bool Heap::InFromSpace(Object* object) {
return new_space_.FromSpaceContains(object);
}
@@ -275,29 +316,36 @@ bool Heap::InToSpace(Object* object) {
}
+bool Heap::OldGenerationAllocationLimitReached() {
+ if (!incremental_marking()->IsStopped()) return false;
+ return OldGenerationSpaceAvailable() < 0;
+}
+
+
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if:
// - the object has survived a scavenge operation or
// - to space is already 25% full.
- return old_address < new_space_.age_mark()
- || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
+ NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+ Address age_mark = new_space_.age_mark();
+ bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+ (!page->ContainsLimit(age_mark) || old_address < age_mark);
+ return below_mark || (new_space_.Size() + object_size) >=
+ (new_space_.EffectiveCapacity() >> 2);
}
void Heap::RecordWrite(Address address, int offset) {
- if (new_space_.Contains(address)) return;
- ASSERT(!new_space_.FromSpaceContains(address));
- SLOW_ASSERT(Contains(address + offset));
- Page::FromAddress(address)->MarkRegionDirty(address + offset);
+ if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
}
void Heap::RecordWrites(Address address, int start, int len) {
- if (new_space_.Contains(address)) return;
- ASSERT(!new_space_.FromSpaceContains(address));
- Page* page = Page::FromAddress(address);
- page->SetRegionMarks(page->GetRegionMarks() |
- page->GetRegionMaskForSpan(address + start, len * kPointerSize));
+ if (!InNewSpace(address)) {
+ for (int i = 0; i < len; i++) {
+ store_buffer_.Mark(address + start + i * kPointerSize);
+ }
+ }
}
@@ -336,38 +384,12 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
}
-void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
-
- Page* page = Page::FromAddress(dst);
- uint32_t marks = page->GetRegionMarks();
-
- for (int remaining = byte_size / kPointerSize;
- remaining > 0;
- remaining--) {
- Memory::Object_at(dst) = Memory::Object_at(src);
-
- if (InNewSpace(Memory::Object_at(dst))) {
- marks |= page->GetRegionMaskForAddress(dst);
- }
-
- dst += kPointerSize;
- src += kPointerSize;
- }
-
- page->SetRegionMarks(marks);
-}
-
-
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
@@ -387,16 +409,6 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
-void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
- ASSERT((dst < src) || (dst >= (src + byte_size)));
-
- CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
-}
-
-
void Heap::ScavengePointer(HeapObject** p) {
ScavengeObject(p, *p);
}
@@ -414,7 +426,9 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
+ HeapObject* dest = first_word.ToForwardingAddress();
+ ASSERT(HEAP->InFromSpace(*p));
+ *p = dest;
return;
}
@@ -459,7 +473,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage(false);
+ CollectAllGarbage(kNoGCFlags);
}
} else {
// Avoid underflow.
@@ -476,6 +490,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
roots_[kLastScriptIdRootIndex] = last_script_id;
}
+
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -581,11 +596,11 @@ void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
ASSERT(heap_->InNewSpace(new_space_strings_[i]));
- ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
+ ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
- ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
+ ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
}
#endif
}
@@ -600,7 +615,9 @@ void ExternalStringTable::AddOldString(String* string) {
void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
- Verify();
+ if (FLAG_verify_heap) {
+ Verify();
+ }
}
@@ -688,15 +705,6 @@ Heap* _inline_get_heap_() {
}
-void MarkCompactCollector::SetMark(HeapObject* obj) {
- tracer_->increment_marked_count();
-#ifdef DEBUG
- UpdateLiveObjectCount(obj);
-#endif
- obj->SetMark();
-}
-
-
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 7e613e917..46c63c27c 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -114,7 +114,6 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index d0185930b..f948c6c88 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -36,13 +36,16 @@
#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
+#include "incremental-marking.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
+#include "objects-visiting-inl.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "snapshot.h"
+#include "store-buffer.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -58,10 +61,6 @@ namespace v8 {
namespace internal {
-static const intptr_t kMinimumPromotionLimit = 2 * MB;
-static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
-
static Mutex* gc_initializer_mutex = OS::CreateMutex();
@@ -70,27 +69,21 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
- reserved_semispace_size_(2*MB),
- max_semispace_size_(2*MB),
- initial_semispace_size_(128*KB),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
+#define LUMP_OF_MEMORY (128 * KB)
code_range_size_(0),
#elif defined(V8_TARGET_ARCH_X64)
- reserved_semispace_size_(16*MB),
- max_semispace_size_(16*MB),
- initial_semispace_size_(1*MB),
- max_old_generation_size_(1400*MB),
- max_executable_size_(256*MB),
+#define LUMP_OF_MEMORY (2 * MB)
code_range_size_(512*MB),
#else
- reserved_semispace_size_(8*MB),
- max_semispace_size_(8*MB),
- initial_semispace_size_(512*KB),
- max_old_generation_size_(700*MB),
- max_executable_size_(128*MB),
+#define LUMP_OF_MEMORY MB
code_range_size_(0),
#endif
+ reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ max_old_generation_size_(700ul * LUMP_OF_MEMORY),
+ max_executable_size_(128l * LUMP_OF_MEMORY),
+
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -100,6 +93,7 @@ Heap::Heap()
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
+ scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
old_data_space_(NULL),
@@ -109,7 +103,6 @@ Heap::Heap()
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
- mc_count_(0),
ms_count_(0),
gc_count_(0),
unflattened_strings_length_(0),
@@ -119,12 +112,16 @@ Heap::Heap()
disallow_allocation_failure_(false),
debug_utils_(NULL),
#endif // DEBUG
+ new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
old_gen_allocation_limit_(kMinimumAllocationLimit),
+ old_gen_limit_factor_(1),
+ size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
+ store_buffer_rebuilder_(store_buffer()),
hidden_symbol_(NULL),
global_gc_prologue_callback_(NULL),
global_gc_epilogue_callback_(NULL),
@@ -141,12 +138,20 @@ Heap::Heap()
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
last_gc_end_timestamp_(0.0),
- page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+ store_buffer_(this),
+ marking_(this),
+ incremental_marking_(this),
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
+ idle_notification_will_schedule_next_gc_(false),
+ mark_sweeps_since_idle_round_started_(0),
+ ms_count_at_last_idle_notification_(0),
+ gc_count_at_last_idle_gc_(0),
+ scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+ promotion_queue_(this),
configured_(false),
- is_safe_to_read_maps_(true) {
+ chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -224,29 +229,10 @@ bool Heap::HasBeenSetup() {
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
- ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return object->SizeFromMap(map_word.ToMap());
-}
-
-
-int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
- ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
- ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
- uint32_t marker = Memory::uint32_at(object->address());
- if (marker == MarkCompactCollector::kSingleFreeEncoding) {
- return kIntSize;
- } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
- return Memory::int_at(object->address() + kIntSize);
- } else {
- MapWord map_word = object->map_word();
- Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
- Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
- return object->SizeFromMap(map);
+ if (IntrusiveMarking::IsMarked(object)) {
+ return IntrusiveMarking::SizeOfMarkedObject(object);
}
+ return object->SizeFromMap(object->map());
}
@@ -400,6 +386,7 @@ void Heap::GarbageCollectionPrologue() {
#endif // DEBUG
LiveObjectList::GCPrologue();
+ store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@@ -412,6 +399,7 @@ intptr_t Heap::SizeOfObjects() {
}
void Heap::GarbageCollectionEpilogue() {
+ store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
@@ -443,13 +431,13 @@ void Heap::GarbageCollectionEpilogue() {
}
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(int flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- mark_compact_collector_.SetForceCompaction(force_compaction);
+ mark_compact_collector_.SetFlags(flags);
CollectGarbage(OLD_POINTER_SPACE);
- mark_compact_collector_.SetForceCompaction(false);
+ mark_compact_collector_.SetFlags(kNoGCFlags);
}
@@ -457,8 +445,6 @@ void Heap::CollectAllAvailableGarbage() {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- mark_compact_collector()->SetForceCompaction(true);
-
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
@@ -467,13 +453,17 @@ void Heap::CollectAllAvailableGarbage() {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
+ isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
break;
}
}
- mark_compact_collector()->SetForceCompaction(false);
+ mark_compact_collector()->SetFlags(kNoGCFlags);
+ new_space_.Shrink();
+ incremental_marking()->UncommitMarkingDeque();
}
@@ -490,6 +480,23 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
+ if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Scavenge during marking.\n");
+ }
+ }
+
+ if (collector == MARK_COMPACTOR &&
+ !mark_compact_collector()->PreciseSweepingRequired() &&
+ !incremental_marking()->IsStopped() &&
+ !incremental_marking()->should_hurry() &&
+ FLAG_incremental_marking_steps) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ }
+ collector = SCAVENGER;
+ }
+
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this);
@@ -512,13 +519,24 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
GarbageCollectionEpilogue();
}
+ ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+ if (incremental_marking()->IsStopped()) {
+ if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+ incremental_marking()->Start();
+ }
+ }
+
return next_gc_likely_to_collect_more;
}
void Heap::PerformScavenge() {
GCTracer tracer(this);
- PerformGarbageCollection(SCAVENGER, &tracer);
+ if (incremental_marking()->IsStopped()) {
+ PerformGarbageCollection(SCAVENGER, &tracer);
+ } else {
+ PerformGarbageCollection(MARK_COMPACTOR, &tracer);
+ }
}
@@ -531,7 +549,7 @@ class SymbolTableVerifier : public ObjectVisitor {
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
// Check that the symbol is actually a symbol.
- ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
+ ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
}
}
}
@@ -610,13 +628,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Try shrinking and try again.
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->RelinkPageListInChunkOrder(true);
- }
-
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -647,7 +658,10 @@ void Heap::ClearJSFunctionResultCaches() {
void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive() &&
+ !incremental_marking()->IsMarking()) {
+ return;
+ }
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
@@ -657,24 +671,6 @@ void Heap::ClearNormalizedMapCaches() {
}
-#ifdef DEBUG
-
-enum PageWatermarkValidity {
- ALL_VALID,
- ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
- PageWatermarkValidity validity) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- bool expected_value = (validity == ALL_VALID);
- while (it.has_next()) {
- Page* page = it.next();
- ASSERT(page->IsWatermarkValid() == expected_value);
- }
-}
-#endif
-
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -707,7 +703,9 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PROFILE(isolate_, CodeMovingGCEvent());
}
- VerifySymbolTable();
+ if (FLAG_verify_heap) {
+ VerifySymbolTable();
+ }
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@@ -727,6 +725,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
int start_new_space_size = Heap::new_space()->SizeAsInt();
+ if (IsHighSurvivalRate()) {
+ // We speed up the incremental marker if it is running so that it
+ // does not fall behind the rate of promotion, which would cause a
+ // constantly growing old space.
+ incremental_marking()->NotifyOfHighPromotionRate();
+ }
+
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
@@ -736,11 +741,33 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
- intptr_t old_gen_size = PromotedSpaceSize();
- old_gen_promotion_limit_ =
- old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
- old_gen_allocation_limit_ =
- old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+ if (!new_space_high_promotion_mode_active_ &&
+ new_space_.Capacity() == new_space_.MaximumCapacity() &&
+ IsStableOrIncreasingSurvivalTrend() &&
+ IsHighSurvivalRate()) {
+ // Stable high survival rates even though young generation is at
+ // maximum capacity indicates that most objects will be promoted.
+ // To decrease scavenger pauses and final mark-sweep pauses, we
+ // have to limit maximal capacity of the young generation.
+ new_space_high_promotion_mode_active_ = true;
+ if (FLAG_trace_gc) {
+ PrintF("Limited new space size due to high promotion rate: %d MB\n",
+ new_space_.InitialCapacity() / MB);
+ }
+ } else if (new_space_high_promotion_mode_active_ &&
+ IsDecreasingSurvivalTrend() &&
+ !IsHighSurvivalRate()) {
+ // Decreasing low survival rates might indicate that the above high
+ // promotion mode is over and we should allow the young generation
+ // to grow again.
+ new_space_high_promotion_mode_active_ = false;
+ if (FLAG_trace_gc) {
+ PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
+ new_space_.MaximumCapacity() / MB);
+ }
+ }
+
+ size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@@ -750,10 +777,16 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
- old_gen_promotion_limit_ *= 2;
- old_gen_allocation_limit_ *= 2;
+ old_gen_limit_factor_ = 2;
+ } else {
+ old_gen_limit_factor_ = 1;
}
+ old_gen_promotion_limit_ =
+ OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+ old_gen_allocation_limit_ =
+ OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
@@ -763,6 +796,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
}
+ if (new_space_high_promotion_mode_active_ &&
+ new_space_.Capacity() > new_space_.InitialCapacity()) {
+ new_space_.Shrink();
+ }
+
isolate_->counters()->objs_since_last_young()->Set(0);
gc_post_processing_depth_++;
@@ -782,9 +820,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
amount_of_external_allocated_memory_;
}
- GCCallbackFlags callback_flags = tracer->is_compacting()
- ? kGCCallbackFlagCompacted
- : kNoGCCallbackFlags;
+ GCCallbackFlags callback_flags = kNoGCCallbackFlags;
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -796,7 +832,9 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_epilogue_callback_();
}
- VerifySymbolTable();
+ if (FLAG_verify_heap) {
+ VerifySymbolTable();
+ }
return next_gc_likely_to_collect_more;
}
@@ -808,34 +846,24 @@ void Heap::MarkCompact(GCTracer* tracer) {
mark_compact_collector_.Prepare(tracer);
- bool is_compacting = mark_compact_collector_.IsCompacting();
-
- if (is_compacting) {
- mc_count_++;
- } else {
- ms_count_++;
- }
- tracer->set_full_gc_count(mc_count_ + ms_count_);
+ ms_count_++;
+ tracer->set_full_gc_count(ms_count_);
- MarkCompactPrologue(is_compacting);
+ MarkCompactPrologue();
- is_safe_to_read_maps_ = false;
mark_compact_collector_.CollectGarbage();
- is_safe_to_read_maps_ = true;
LOG(isolate_, ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
- Shrink();
-
isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
}
-void Heap::MarkCompactPrologue(bool is_compacting) {
+void Heap::MarkCompactPrologue() {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
isolate_->keyed_lookup_cache()->Clear();
@@ -847,7 +875,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
CompletelyClearInstanceofCache();
- if (is_compacting) FlushNumberStringCache();
+ // TODO(1605) select heuristic for flushing NumberString cache with
+ // FlushNumberStringCache
if (FLAG_cleanup_code_caches_at_gc) {
polymorphic_code_cache()->set_cache(undefined_value());
}
@@ -857,13 +886,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
Object* Heap::FindCodeObject(Address a) {
- Object* obj = NULL; // Initialization to please compiler.
- { MaybeObject* maybe_obj = code_space_->FindObject(a);
- if (!maybe_obj->ToObject(&obj)) {
- obj = lo_space_->FindObject(a)->ToObjectUnchecked();
- }
- }
- return obj;
+ return isolate()->inner_pointer_to_code_cache()->
+ GcSafeFindCodeForInnerPointer(a);
}
@@ -911,23 +935,29 @@ static void VerifyNonPointerSpacePointers() {
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(HEAP->code_space());
- for (HeapObject* object = code_it.next();
- object != NULL; object = code_it.next())
+ for (HeapObject* object = code_it.Next();
+ object != NULL; object = code_it.Next())
object->Iterate(&v);
- HeapObjectIterator data_it(HEAP->old_data_space());
- for (HeapObject* object = data_it.next();
- object != NULL; object = data_it.next())
- object->Iterate(&v);
+ // The old data space was normally swept conservatively so that the iterator
+ // doesn't work, so we normally skip the next bit.
+ if (!HEAP->old_data_space()->was_swept_conservatively()) {
+ HeapObjectIterator data_it(HEAP->old_data_space());
+ for (HeapObject* object = data_it.Next();
+ object != NULL; object = data_it.Next())
+ object->Iterate(&v);
+ }
}
#endif
void Heap::CheckNewSpaceExpansionCriteria() {
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.Capacity()) {
- // Grow the size of new space if there is room to grow and enough
- // data has survived scavenge since the last expansion.
+ survived_since_last_expansion_ > new_space_.Capacity() &&
+ !new_space_high_promotion_mode_active_) {
+ // Grow the size of new space if there is room to grow, enough data
+ // has survived scavenge since the last expansion and we are not in
+ // high promotion mode.
new_space_.Grow();
survived_since_last_expansion_ = 0;
}
@@ -940,28 +970,106 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
}
-void Heap::Scavenge() {
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
-#endif
+void Heap::ScavengeStoreBufferCallback(
+ Heap* heap,
+ MemoryChunk* page,
+ StoreBufferEvent event) {
+ heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+ if (event == kStoreBufferStartScanningPagesEvent) {
+ start_of_current_page_ = NULL;
+ current_page_ = NULL;
+ } else if (event == kStoreBufferScanningPageEvent) {
+ if (current_page_ != NULL) {
+ // If this page already overflowed the store buffer during this iteration.
+ if (current_page_->scan_on_scavenge()) {
+ // Then we should wipe out the entries that have been added for it.
+ store_buffer_->SetTop(start_of_current_page_);
+ } else if (store_buffer_->Top() - start_of_current_page_ >=
+ (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+ // Did we find too many pointers in the previous page? The heuristic is
+ // that no page can take more then 1/5 the remaining slots in the store
+ // buffer.
+ current_page_->set_scan_on_scavenge(true);
+ store_buffer_->SetTop(start_of_current_page_);
+ } else {
+ // In this case the page we scanned took a reasonable number of slots in
+ // the store buffer. It has now been rehabilitated and is no longer
+ // marked scan_on_scavenge.
+ ASSERT(!current_page_->scan_on_scavenge());
+ }
+ }
+ start_of_current_page_ = store_buffer_->Top();
+ current_page_ = page;
+ } else if (event == kStoreBufferFullEvent) {
+ // The current page overflowed the store buffer again. Wipe out its entries
+ // in the store buffer and mark it scan-on-scavenge again. This may happen
+ // several times while scanning.
+ if (current_page_ == NULL) {
+ // Store Buffer overflowed while scanning promoted objects. These are not
+ // in any particular page, though they are likely to be clustered by the
+ // allocation routines.
+ store_buffer_->HandleFullness();
+ } else {
+ // Store Buffer overflowed while scanning a particular old space page for
+ // pointers to new space.
+ ASSERT(current_page_ == page);
+ ASSERT(page != NULL);
+ current_page_->set_scan_on_scavenge(true);
+ ASSERT(start_of_current_page_ != store_buffer_->Top());
+ store_buffer_->SetTop(start_of_current_page_);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
- gc_state_ = SCAVENGE;
- SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+void PromotionQueue::Initialize() {
+ // Assumes that a NewSpacePage exactly fits a number of promotion queue
+ // entries (where each is a pair of intptr_t). This allows us to simplify
+ // the test fpr when to switch pages.
+ ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+ == 0);
+ limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+ front_ = rear_ =
+ reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+ emergency_stack_ = NULL;
+ guard_ = false;
+}
+
- Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
+void PromotionQueue::RelocateQueueHead() {
+ ASSERT(emergency_stack_ == NULL);
+
+ Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ intptr_t* head_start = rear_;
+ intptr_t* head_end =
+ Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
+
+ int entries_count =
+ static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+ emergency_stack_ = new List<Entry>(2 * entries_count);
+
+ while (head_start != head_end) {
+ int size = static_cast<int>(*(head_start++));
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+ emergency_stack_->Add(Entry(obj, size));
+ }
+ rear_ = head_end;
+}
+
+
+void Heap::Scavenge() {
#ifdef DEBUG
- VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
- VerifyPageWatermarkValidity(map_space_, ALL_VALID);
+ if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
- // We do not update an allocation watermark of the top page during linear
- // allocation to avoid overhead. So to maintain the watermark invariant
- // we have to manually cache the watermark and mark the top page as having an
- // invalid watermark. This guarantees that dirty regions iteration will use a
- // correct watermark even if a linear allocation happens.
- old_pointer_space_->FlushTopPageWatermark();
- map_space_->FlushTopPageWatermark();
+ gc_state_ = SCAVENGE;
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -974,6 +1082,12 @@ void Heap::Scavenge() {
CheckNewSpaceExpansionCriteria();
+ SelectScavengingVisitorsTable();
+
+ incremental_marking()->PrepareForScavenge();
+
+ AdvanceSweepers(static_cast<int>(new_space_.Size()));
+
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
@@ -996,32 +1110,29 @@ void Heap::Scavenge() {
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceLow();
- promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+ Address new_space_front = new_space_.ToSpaceStart();
+ promotion_queue_.Initialize();
+
+#ifdef DEBUG
+ store_buffer()->Clean();
+#endif
- is_safe_to_read_maps_ = false;
ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- // Copy objects reachable from the old generation. By definition,
- // there are no intergenerational pointers in code or data spaces.
- IterateDirtyRegions(old_pointer_space_,
- &Heap::IteratePointersInDirtyRegion,
- &ScavengePointer,
- WATERMARK_CAN_BE_INVALID);
-
- IterateDirtyRegions(map_space_,
- &IteratePointersInDirtyMapsRegion,
- &ScavengePointer,
- WATERMARK_CAN_BE_INVALID);
-
- lo_space_->IterateDirtyRegions(&ScavengePointer);
+ // Copy objects reachable from the old generation.
+ {
+ StoreBufferRebuildScope scope(this,
+ store_buffer(),
+ &ScavengeStoreBufferCallback);
+ store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+ }
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.next();
- cell != NULL; cell = cell_iterator.next()) {
+ for (HeapObject* cell = cell_iterator.Next();
+ cell != NULL; cell = cell_iterator.Next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
@@ -1040,20 +1151,23 @@ void Heap::Scavenge() {
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ promotion_queue_.Destroy();
+
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+ incremental_marking()->UpdateMarkingDequeAfterScavenge();
ASSERT(new_space_front == new_space_.top());
- is_safe_to_read_maps_ = true;
-
// Set age mark.
new_space_.set_age_mark(new_space_.top());
+ new_space_.LowerInlineAllocationLimit(
+ new_space_.inline_allocation_limit_step());
+
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
@@ -1061,6 +1175,8 @@ void Heap::Scavenge() {
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
+
+ scavenges_since_last_idle_round_++;
}
@@ -1081,7 +1197,9 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
- external_string_table_.Verify();
+ if (FLAG_verify_heap) {
+ external_string_table_.Verify();
+ }
if (external_string_table_.new_space_strings_.is_empty()) return;
@@ -1112,35 +1230,56 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
}
+void Heap::UpdateReferencesInExternalStringTable(
+ ExternalStringTableUpdaterCallback updater_func) {
+
+ // Update old space string references.
+ if (external_string_table_.old_space_strings_.length() > 0) {
+ Object** start = &external_string_table_.old_space_strings_[0];
+ Object** end = start + external_string_table_.old_space_strings_.length();
+ for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+ }
+
+ UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
static Object* ProcessFunctionWeakReferences(Heap* heap,
Object* function,
WeakObjectRetainer* retainer) {
- Object* head = heap->undefined_value();
+ Object* undefined = heap->undefined_value();
+ Object* head = undefined;
JSFunction* tail = NULL;
Object* candidate = function;
- while (candidate != heap->undefined_value()) {
+ while (candidate != undefined) {
// Check whether to keep the candidate in the list.
JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head == heap->undefined_value()) {
+ if (head == undefined) {
// First element in the list.
- head = candidate_function;
+ head = retain;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_next_function_link(candidate_function);
+ tail->set_next_function_link(retain);
}
// Retained function is new tail.
+ candidate_function = reinterpret_cast<JSFunction*>(retain);
tail = candidate_function;
+
+ ASSERT(retain->IsUndefined() || retain->IsJSFunction());
+
+ if (retain == undefined) break;
}
+
// Move to next element in the list.
candidate = candidate_function->next_function_link();
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(heap->undefined_value());
+ tail->set_next_function_link(undefined);
}
return head;
@@ -1148,28 +1287,32 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* head = undefined_value();
+ Object* undefined = undefined_value();
+ Object* head = undefined;
Context* tail = NULL;
Object* candidate = global_contexts_list_;
- while (candidate != undefined_value()) {
+ while (candidate != undefined) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head == undefined_value()) {
+ if (head == undefined) {
// First element in the list.
- head = candidate_context;
+ head = retain;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_unchecked(this,
Context::NEXT_CONTEXT_LINK,
- candidate_context,
+ retain,
UPDATE_WRITE_BARRIER);
}
// Retained context is new tail.
+ candidate_context = reinterpret_cast<Context*>(retain);
tail = candidate_context;
+ if (retain == undefined) break;
+
// Process the weak list of optimized functions for the context.
Object* function_list_head =
ProcessFunctionWeakReferences(
@@ -1181,6 +1324,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
function_list_head,
UPDATE_WRITE_BARRIER);
}
+
// Move to next element in the list.
candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
}
@@ -1212,35 +1356,45 @@ class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
- ASSERT(new_space_front <= new_space_.top());
-
+ SemiSpace::AssertValidRange(new_space_front, new_space_.top());
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
- while (new_space_front < new_space_.top()) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
+ while (new_space_front != new_space_.top()) {
+ if (!NewSpacePage::IsAtEnd(new_space_front)) {
+ HeapObject* object = HeapObject::FromAddress(new_space_front);
+ new_space_front +=
+ NewSpaceScavenger::IterateBody(object->map(), object);
+ } else {
+ new_space_front =
+ NewSpacePage::FromLimit(new_space_front)->next_page()->body();
+ }
}
// Promote and process all the to-be-promoted objects.
- while (!promotion_queue_.is_empty()) {
- HeapObject* target;
- int size;
- promotion_queue_.remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during dirty regions iteration. Thus we search specificly
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- ASSERT(!target->IsMap());
- IterateAndMarkPointersToFromSpace(target->address(),
- target->address() + size,
- &ScavengePointer);
+ {
+ StoreBufferRebuildScope scope(this,
+ store_buffer(),
+ &ScavengeStoreBufferCallback);
+ while (!promotion_queue()->is_empty()) {
+ HeapObject* target;
+ int size;
+ promotion_queue()->remove(&target, &size);
+
+ // Promoted object might be already partially visited
+ // during old space pointer iteration. Thus we search specificly
+ // for pointers to from semispace instead of looking for pointers
+ // to new space.
+ ASSERT(!target->IsMap());
+ IterateAndMarkPointersToFromSpace(target->address(),
+ target->address() + size,
+ &ScavengeObject);
+ }
}
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
- } while (new_space_front < new_space_.top());
+ } while (new_space_front != new_space_.top());
return new_space_front;
}
@@ -1252,26 +1406,11 @@ enum LoggingAndProfiling {
};
-typedef void (*ScavengingCallback)(Map* map,
- HeapObject** slot,
- HeapObject* object);
-
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-static Atomic32 scavenging_visitors_table_mode_;
-static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-
-INLINE(static void DoScavengeObject(Map* map,
- HeapObject** slot,
- HeapObject* obj));
-
-void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
- scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-}
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+ LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
@@ -1306,9 +1445,13 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
- table_.Register(kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>);
+ if (marks_handling == IGNORE_MARKS) {
+ table_.Register(kVisitJSFunction,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ template VisitSpecialized<JSFunction::kSize>);
+ } else {
+ table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+ }
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject,
@@ -1349,10 +1492,10 @@ class ScavengingVisitor : public StaticVisitorBase {
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
- INLINE(static HeapObject* MigrateObject(Heap* heap,
- HeapObject* source,
- HeapObject* target,
- int size)) {
+ INLINE(static void MigrateObject(Heap* heap,
+ HeapObject* source,
+ HeapObject* target,
+ int size)) {
// Copy the content of source to target.
heap->CopyBlock(target->address(), source->address(), size);
@@ -1373,26 +1516,30 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
- return target;
+ if (marks_handling == TRANSFER_MARKS) {
+ if (Marking::TransferColor(source, target)) {
+ MemoryChunk::IncrementLiveBytes(target->address(), size);
+ }
+ }
}
-
template<ObjectContents object_contents, SizeRestriction size_restriction>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
HeapObject* object,
int object_size) {
- ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxHeapObjectSize));
- ASSERT(object->Size() == object_size);
+ SLOW_ASSERT((size_restriction != SMALL) ||
+ (object_size <= Page::kMaxHeapObjectSize));
+ SLOW_ASSERT(object->Size() == object_size);
- Heap* heap = map->heap();
+ Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
+ maybe_result = heap->lo_space()->AllocateRaw(object_size,
+ NOT_EXECUTABLE);
} else {
if (object_contents == DATA_OBJECT) {
maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1404,7 +1551,12 @@ class ScavengingVisitor : public StaticVisitorBase {
Object* result = NULL; // Initialization to please compiler.
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- *slot = MigrateObject(heap, object , target, object_size);
+
+ // Order is important: slot might be inside of the target if target
+ // was allocated over a dead object and slot comes from the store
+ // buffer.
+ *slot = target;
+ MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) {
heap->promotion_queue()->insert(target, object_size);
@@ -1414,13 +1566,42 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
- Object* result =
- heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
- *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
+ MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+ Object* result = allocation->ToObjectUnchecked();
+ HeapObject* target = HeapObject::cast(result);
+
+ // Order is important: slot might be inside of the target if target
+ // was allocated over a dead object and slot comes from the store
+ // buffer.
+ *slot = target;
+ MigrateObject(heap, object, target, object_size);
return;
}
+ static inline void EvacuateJSFunction(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ ObjectEvacuationStrategy<POINTER_OBJECT>::
+ template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+
+ HeapObject* target = *slot;
+ MarkBit mark_bit = Marking::MarkBitFrom(target);
+ if (Marking::IsBlack(mark_bit)) {
+ // This object is black and it might not be rescanned by marker.
+ // We should explicitly record code entry slot for compaction because
+ // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+ // miss it as it is not HeapObject-tagged.
+ Address code_entry_slot =
+ target->address() + JSFunction::kCodeEntryOffset;
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+ map->GetHeap()->mark_compact_collector()->
+ RecordCodeEntrySlot(code_entry_slot, code);
+ }
+ }
+
+
static inline void EvacuateFixedArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -1479,14 +1660,17 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
- if (ConsString::cast(object)->unchecked_second() ==
- map->heap()->empty_string()) {
+ Heap* heap = map->GetHeap();
+
+ if (marks_handling == IGNORE_MARKS &&
+ ConsString::cast(object)->unchecked_second() ==
+ heap->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
- if (!map->heap()->InNewSpace(first)) {
+ if (!heap->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
@@ -1500,7 +1684,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
- DoScavengeObject(first->map(), slot, first);
+ heap->DoScavengeObject(first->map(), slot, first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
@@ -1531,55 +1715,70 @@ class ScavengingVisitor : public StaticVisitorBase {
};
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+ LoggingAndProfiling logging_and_profiling_mode>
VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<logging_and_profiling_mode>::table_;
+ ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
static void InitializeScavengingVisitorsTables() {
- ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
- scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
}
-void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
- if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
- // Table was already updated by some isolate.
- return;
- }
-
- if (isolate()->logger()->is_logging() |
+void Heap::SelectScavengingVisitorsTable() {
+ bool logging_and_profiling =
+ isolate()->logger()->is_logging() ||
CpuProfiler::is_profiling(isolate()) ||
(isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling())) {
- // If one of the isolates is doing scavenge at this moment of time
- // it might see this table in an inconsitent state when
- // some of the callbacks point to
- // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
- // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
- // However this does not lead to any bugs as such isolate does not have
- // profiling enabled and any isolate with enabled profiling is guaranteed
- // to see the table in the consistent state.
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ isolate()->heap_profiler()->is_profiling());
- // We use Release_Store to prevent reordering of this write before writes
- // to the table.
- Release_Store(&scavenging_visitors_table_mode_,
- LOGGING_AND_PROFILING_ENABLED);
+ if (!incremental_marking()->IsMarking()) {
+ if (!logging_and_profiling) {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<IGNORE_MARKS,
+ LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ } else {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<IGNORE_MARKS,
+ LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ }
+ } else {
+ if (!logging_and_profiling) {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ } else {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ }
+
+ if (incremental_marking()->IsCompacting()) {
+ // When compacting forbid short-circuiting of cons-strings.
+ // Scavenging code relies on the fact that new space object
+ // can't be evacuated into evacuation candidate but
+ // short-circuiting violates this assumption.
+ scavenging_visitors_table_.Register(
+ StaticVisitorBase::kVisitShortcutCandidate,
+ scavenging_visitors_table_.GetVisitorById(
+ StaticVisitorBase::kVisitConsString));
+ }
}
}
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
+ SLOW_ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
- ASSERT(!first_word.IsForwardingAddress());
+ SLOW_ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
- DoScavengeObject(map, p, object);
+ map->GetHeap()->DoScavengeObject(map, p, object);
}
@@ -1605,29 +1804,31 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
}
-MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type,
+ int instance_size,
+ ElementsKind elements_kind) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawMap();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Map* map = reinterpret_cast<Map*>(result);
- map->set_map(meta_map());
+ map->set_map_unsafe(meta_map());
map->set_instance_type(instance_type);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
- map->set_prototype(null_value());
- map->set_constructor(null_value());
+ map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
+ map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors();
- map->set_code_cache(empty_fixed_array());
- map->set_prototype_transitions(empty_fixed_array());
+ map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- map->set_elements_kind(FAST_ELEMENTS);
+ map->set_elements_kind(elements_kind);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -1645,8 +1846,8 @@ MaybeObject* Heap::AllocateCodeCache() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
CodeCache* code_cache = CodeCache::cast(result);
- code_cache->set_default_cache(empty_fixed_array());
- code_cache->set_normal_type_cache(undefined_value());
+ code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
return code_cache;
}
@@ -1707,12 +1908,19 @@ bool Heap::CreateInitialMaps() {
}
set_empty_fixed_array(FixedArray::cast(obj));
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+ { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_null_value(obj);
+ set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
+ { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_undefined_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+ ASSERT(!InNewSpace(undefined_value()));
+
// Allocate the empty descriptor array.
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -1753,7 +1961,7 @@ bool Heap::CreateInitialMaps() {
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_serialized_scope_info_map(Map::cast(obj));
+ set_scope_info_map(Map::cast(obj));
{ MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -1798,6 +2006,12 @@ bool Heap::CreateInitialMaps() {
}
set_byte_array_map(Map::cast(obj));
+ { MaybeObject* maybe_obj =
+ AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_free_space_map(Map::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -1959,7 +2173,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(heap_number_map());
+ HeapObject::cast(result)->set_map_unsafe(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@@ -1977,7 +2191,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(heap_number_map());
+ HeapObject::cast(result)->set_map_unsafe(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@@ -1988,7 +2202,7 @@ MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
{ MaybeObject* maybe_result = AllocateRawCell();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(global_property_cell_map());
+ HeapObject::cast(result)->set_map_unsafe(global_property_cell_map());
JSGlobalPropertyCell::cast(result)->set_value(value);
return result;
}
@@ -1998,7 +2212,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
Object* result;
- { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2011,7 +2225,13 @@ bool Heap::CreateApiObjects() {
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_neander_map(Map::cast(obj));
+ // Don't use Smi-only elements optimizations for objects with the neander
+ // map. There are too many cases where element values are set directly with a
+ // bottleneck to trap the Smi-only -> fast elements transition, and there
+ // appears to be no benefit for optimize this case.
+ Map* new_neander_map = Map::cast(obj);
+ new_neander_map->set_elements_kind(FAST_ELEMENTS);
+ set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2056,6 +2276,12 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime();
}
@@ -2066,20 +2292,18 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_minus_zero_value(obj);
+ set_minus_zero_value(HeapNumber::cast(obj));
ASSERT(signbit(minus_zero_value()->Number()) != 0);
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_nan_value(obj);
+ set_nan_value(HeapNumber::cast(obj));
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+ { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_undefined_value(obj);
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- ASSERT(!InNewSpace(undefined_value()));
+ set_infinity_value(HeapNumber::cast(obj));
// Allocate initial symbol table.
{ MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
@@ -2088,19 +2312,17 @@ bool Heap::CreateInitialObjects() {
// Don't use set_symbol_table() due to asserts.
roots_[kSymbolTableRootIndex] = obj;
- // Assign the print strings for oddballs after creating symboltable.
- Object* symbol;
- { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
- if (!maybe_symbol->ToObject(&symbol)) return false;
+ // Finish initializing oddballs after creating symboltable.
+ { MaybeObject* maybe_obj =
+ undefined_value()->Initialize("undefined",
+ nan_value(),
+ Oddball::kUndefined);
+ if (!maybe_obj->ToObject(&obj)) return false;
}
- Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
- Oddball::cast(undefined_value())->set_to_number(nan_value());
- // Allocate the null_value
+ // Initialize the null_value.
{ MaybeObject* maybe_obj =
- Oddball::cast(null_value())->Initialize("null",
- Smi::FromInt(0),
- Oddball::kNull);
+ null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2109,43 +2331,51 @@ bool Heap::CreateInitialObjects() {
Oddball::kTrue);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_true_value(obj);
+ set_true_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("false",
Smi::FromInt(0),
Oddball::kFalse);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_false_value(obj);
+ set_false_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("hole",
Smi::FromInt(-1),
Oddball::kTheHole);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_the_hole_value(obj);
+ set_the_hole_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4),
+ Smi::FromInt(-2),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_arguments_marker(obj);
+ set_arguments_marker(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-2),
+ Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-3),
+ Smi::FromInt(-4),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
+ { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
+ Smi::FromInt(-5),
+ Oddball::kOther);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_frame_alignment_marker(Oddball::cast(obj));
+ STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
+
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2422,6 +2652,15 @@ MaybeObject* Heap::NumberToString(Object* number,
}
+MaybeObject* Heap::Uint32ToString(uint32_t value,
+ bool check_number_string_cache) {
+ Object* number;
+ MaybeObject* maybe = NumberFromUint32(value);
+ if (!maybe->To<Object>(&number)) return maybe;
+ return NumberToString(number, check_number_string_cache);
+}
+
+
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}
@@ -2480,12 +2719,10 @@ MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result = Allocate(foreign_map(), space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- Foreign::cast(result)->set_address(address);
+ Foreign* result;
+ MaybeObject* maybe_result = Allocate(foreign_map(), space);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->set_foreign_address(address);
return result;
}
@@ -2499,17 +2736,17 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
- share->set_scope_info(SerializedScopeInfo::Empty());
+ share->set_scope_info(ScopeInfo::Empty());
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
share->set_construct_stub(construct_stub);
share->set_instance_class_name(Object_symbol());
- share->set_function_data(undefined_value());
- share->set_script(undefined_value());
- share->set_debug_info(undefined_value());
- share->set_inferred_name(empty_string());
- share->set_initial_map(undefined_value());
- share->set_this_property_assignments(undefined_value());
+ share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
+ share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
// Set integer fields (smi or int, depending on the architecture).
@@ -2541,8 +2778,8 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSMessageObject* message = JSMessageObject::cast(result);
- message->set_properties(Heap::empty_fixed_array());
- message->set_elements(Heap::empty_fixed_array());
+ message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type);
message->set_arguments(arguments);
message->set_start_position(start_position);
@@ -2658,14 +2895,14 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
// Copy first part.
const char* src;
if (first->IsExternalString()) {
- src = ExternalAsciiString::cast(first)->resource()->data();
+ src = ExternalAsciiString::cast(first)->GetChars();
} else {
src = SeqAsciiString::cast(first)->GetChars();
}
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
if (second->IsExternalString()) {
- src = ExternalAsciiString::cast(second)->resource()->data();
+ src = ExternalAsciiString::cast(second)->GetChars();
} else {
src = SeqAsciiString::cast(second)->GetChars();
}
@@ -2737,25 +2974,23 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// Make an attempt to flatten the buffer to reduce access time.
buffer = buffer->TryFlattenGetString();
- // TODO(1626): For now slicing external strings is not supported. However,
- // a flat cons string can have an external string as first part in some cases.
- // Therefore we have to single out this case as well.
if (!FLAG_string_slices ||
- (buffer->IsConsString() &&
- (!buffer->IsFlat() ||
- !ConsString::cast(buffer)->first()->IsSeqString())) ||
- buffer->IsExternalString() ||
+ !buffer->IsFlat() ||
length < SlicedString::kMinLength ||
pretenure == TENURED) {
Object* result;
- { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
- ? AllocateRawAsciiString(length, pretenure)
- : AllocateRawTwoByteString(length, pretenure);
+ // WriteToFlat takes care of the case when an indirect string has a
+ // different encoding from its underlying string. These encodings may
+ // differ because of externalization.
+ bool is_ascii = buffer->IsAsciiRepresentation();
+ { MaybeObject* maybe_result = is_ascii
+ ? AllocateRawAsciiString(length, pretenure)
+ : AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
- if (buffer->IsAsciiRepresentation()) {
+ if (is_ascii) {
ASSERT(string_result->IsAsciiRepresentation());
char* dest = SeqAsciiString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
@@ -2768,12 +3003,19 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
}
ASSERT(buffer->IsFlat());
- ASSERT(!buffer->IsExternalString());
#if DEBUG
- buffer->StringVerify();
+ if (FLAG_verify_heap) {
+ buffer->StringVerify();
+ }
#endif
Object* result;
+ // When slicing an indirect string we use its encoding for a newly created
+ // slice and don't check the encoding of the underlying string. This is safe
+ // even if the encodings are different because of externalization. If an
+ // indirect ASCII string is pointing to a two-byte string, the two-byte char
+ // codes of the underlying string must still fit into ASCII (because
+ // externalization must not change char codes).
{ Map* map = buffer->IsAsciiRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
@@ -2799,13 +3041,14 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
sliced_string->set_parent(buffer);
sliced_string->set_offset(start);
}
- ASSERT(sliced_string->parent()->IsSeqString());
+ ASSERT(sliced_string->parent()->IsSeqString() ||
+ sliced_string->parent()->IsExternalString());
return result;
}
MaybeObject* Heap::AllocateExternalStringFromAscii(
- ExternalAsciiString::Resource* resource) {
+ const ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
@@ -2828,7 +3071,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
MaybeObject* Heap::AllocateExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource) {
+ const ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
@@ -2892,11 +3135,11 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* result;
{ MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size);
+ : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -2914,7 +3157,7 @@ MaybeObject* Heap::AllocateByteArray(int length) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -2924,12 +3167,12 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
- filler->set_map(one_pointer_filler_map());
+ filler->set_map_unsafe(one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
- filler->set_map(two_pointer_filler_map());
+ filler->set_map_unsafe(two_pointer_filler_map());
} else {
- filler->set_map(byte_array_map());
- ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
+ filler->set_map_unsafe(free_space_map());
+ FreeSpace::cast(filler)->set_size(size);
}
}
@@ -2946,7 +3189,7 @@ MaybeObject* Heap::AllocateExternalArray(int length,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ExternalArray*>(result)->set_map(
+ reinterpret_cast<ExternalArray*>(result)->set_map_unsafe(
MapForExternalArrayType(array_type));
reinterpret_cast<ExternalArray*>(result)->set_length(length);
reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -2962,10 +3205,9 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
bool immovable) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
- Object* reloc_info;
- { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
- if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
- }
+ ByteArray* reloc_info;
+ MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
+ if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
// Compute size.
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
@@ -2975,7 +3217,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
- maybe_result = lo_space_->AllocateRawCode(obj_size);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -2984,17 +3226,18 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (!maybe_result->ToObject(&result)) return maybe_result;
// Initialize the object
- HeapObject::cast(result)->set_map(code_map());
+ HeapObject::cast(result)->set_map_unsafe(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(ByteArray::cast(reloc_info));
+ code->set_relocation_info(reloc_info);
code->set_flags(flags);
if (code->is_call_stub() || code->is_keyed_call_stub()) {
code->set_check_type(RECEIVER_MAP_CHECK);
}
- code->set_deoptimization_data(empty_fixed_array());
+ code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_next_code_flushing_candidate(undefined_value());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -3009,7 +3252,9 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->CopyFrom(desc);
#ifdef DEBUG
- code->Verify();
+ if (FLAG_verify_heap) {
+ code->Verify();
+ }
#endif
return code;
}
@@ -3020,7 +3265,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRawCode(obj_size);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -3063,7 +3308,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
MaybeObject* maybe_result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRawCode(new_obj_size);
+ maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
}
@@ -3089,7 +3334,9 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
new_code->Relocate(new_addr - old_addr);
#ifdef DEBUG
- code->Verify();
+ if (FLAG_verify_heap) {
+ code->Verify();
+ }
#endif
return new_code;
}
@@ -3107,14 +3354,15 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
AllocateRaw(map->instance_size(), space, retry_space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(map);
+ // No need for write barrier since object is white and map is in old space.
+ HeapObject::cast(result)->set_map_unsafe(map);
return result;
}
-MaybeObject* Heap::InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype) {
+void Heap::InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype) {
ASSERT(!prototype->IsMap());
function->initialize_properties();
function->initialize_elements();
@@ -3122,9 +3370,8 @@ MaybeObject* Heap::InitializeFunction(JSFunction* function,
function->set_code(shared->code());
function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value());
- function->set_literals(empty_fixed_array());
+ function->set_literals_or_bindings(empty_fixed_array());
function->set_next_function_link(undefined_value());
- return function;
}
@@ -3134,8 +3381,18 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// different context.
JSFunction* object_function =
function->context()->global_context()->object_function();
+
+ // Each function prototype gets a copy of the object function map.
+ // This avoid unwanted sharing of maps between prototypes of different
+ // constructors.
+ Map* new_map;
+ ASSERT(object_function->has_initial_map());
+ { MaybeObject* maybe_map =
+ object_function->initial_map()->CopyDropTransitions();
+ if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+ }
Object* prototype;
- { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
+ { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
// When creating the prototype for the function we must set its
@@ -3160,7 +3417,8 @@ MaybeObject* Heap::AllocateFunction(Map* function_map,
{ MaybeObject* maybe_result = Allocate(function_map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return InitializeFunction(JSFunction::cast(result), shared, prototype);
+ InitializeFunction(JSFunction::cast(result), shared, prototype);
+ return result;
}
@@ -3171,7 +3429,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject* boilerplate;
int arguments_object_size;
bool strict_mode_callee = callee->IsJSFunction() &&
- JSFunction::cast(callee)->shared()->strict_mode();
+ !JSFunction::cast(callee)->shared()->is_classic_mode();
if (strict_mode_callee) {
boilerplate =
isolate()->context()->global_context()->
@@ -3277,22 +3535,22 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// Inline constructor can only handle inobject properties.
fun->shared()->ForbidInlineConstructor();
} else {
- Object* descriptors_obj;
+ DescriptorArray* descriptors;
{ MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
- if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
+ if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
return maybe_descriptors_obj;
}
}
- DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+ DescriptorArray::WhitenessWitness witness(descriptors);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
FieldDescriptor field(name, i, NONE);
field.SetEnumerationIndex(i);
- descriptors->Set(i, &field);
+ descriptors->Set(i, &field, witness);
}
descriptors->SetNextEnumerationIndex(count);
- descriptors->SortUnchecked();
+ descriptors->SortUnchecked(witness);
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
@@ -3330,6 +3588,9 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
// with undefined_value.
+ // Pre-allocated fields need to be initialized with undefined_value as well
+ // so that object accesses before the constructor completes (e.g. in the
+ // debugger) will not cause a crash.
if (map->constructor()->IsJSFunction() &&
JSFunction::cast(map->constructor())->shared()->
IsInobjectSlackTrackingInProgress()) {
@@ -3339,7 +3600,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
} else {
filler = Heap::undefined_value();
}
- obj->InitializeBody(map->instance_size(), filler);
+ obj->InitializeBody(map, Heap::undefined_value(), filler);
}
@@ -3377,7 +3638,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
+ ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
+ JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -3420,6 +3682,7 @@ MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
result->set_handler(handler);
+ result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
return result;
}
@@ -3443,6 +3706,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
result->set_handler(handler);
+ result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
result->set_call_trap(call_trap);
result->set_construct_trap(construct_trap);
return result;
@@ -3525,13 +3789,15 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
MaybeObject* Heap::CopyJSObject(JSObject* source) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
- ASSERT(!source->IsJSFunction());
+ SLOW_ASSERT(!source->IsJSFunction());
// Make the clone.
Map* map = source->map();
int object_size = map->instance_size();
Object* clone;
+ WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
// If we're forced to always allocate, we use the general allocation
// functions which may leave us with an object in old space.
if (always_allocate()) {
@@ -3548,10 +3814,11 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
JSObject::kHeaderSize,
(object_size - JSObject::kHeaderSize) / kPointerSize);
} else {
+ wb_mode = SKIP_WRITE_BARRIER;
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
- ASSERT(InNewSpace(clone));
+ SLOW_ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(HeapObject::cast(clone)->address(),
@@ -3559,6 +3826,8 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
object_size);
}
+ SLOW_ASSERT(
+ JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
@@ -3574,7 +3843,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
}
if (!maybe_elem->ToObject(&elem)) return maybe_elem;
}
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
+ JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
}
// Update properties if necessary.
if (properties->length() > 0) {
@@ -3582,7 +3851,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
{ MaybeObject* maybe_prop = CopyFixedArray(properties);
if (!maybe_prop->ToObject(&prop)) return maybe_prop;
}
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
+ JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
}
// Return the new clone.
return clone;
@@ -3591,13 +3860,13 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
MaybeObject* Heap::ReinitializeJSReceiver(
JSReceiver* object, InstanceType type, int size) {
- ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
+ ASSERT(type >= FIRST_JS_OBJECT_TYPE);
// Allocate fresh map.
// TODO(rossberg): Once we optimize proxies, cache these maps.
Map* map;
- MaybeObject* maybe_map_obj = AllocateMap(type, size);
- if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+ MaybeObject* maybe = AllocateMap(type, size);
+ if (!maybe->To<Map>(&map)) return maybe;
// Check that the receiver has at least the size of the fresh object.
int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3608,30 +3877,35 @@ MaybeObject* Heap::ReinitializeJSReceiver(
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+ maybe = AllocateFixedArray(prop_size, TENURED);
+ if (!maybe->ToObject(&properties)) return maybe;
+
+ // Functions require some allocation, which might fail here.
+ SharedFunctionInfo* shared = NULL;
+ if (type == JS_FUNCTION_TYPE) {
+ String* name;
+ maybe = LookupAsciiSymbol("<freezing call trap>");
+ if (!maybe->To<String>(&name)) return maybe;
+ maybe = AllocateSharedFunctionInfo(name);
+ if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
}
+ // Because of possible retries of this function after failure,
+ // we must NOT fail after this point, where we have changed the type!
+
// Reset the map for the object.
object->set_map(map);
+ JSObject* jsobj = JSObject::cast(object);
// Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(JSObject::cast(object),
- FixedArray::cast(properties), map);
+ InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
// Functions require some minimal initialization.
if (type == JS_FUNCTION_TYPE) {
- String* name;
- MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
- if (!maybe_name->To<String>(&name)) return maybe_name;
- SharedFunctionInfo* shared;
- MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
- if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
- JSFunction* func;
- MaybeObject* maybe_func =
- InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
- if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
- func->set_context(isolate()->context()->global_context());
+ map->set_function_with_prototype(true);
+ InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+ JSFunction::cast(object)->set_context(
+ isolate()->context()->global_context());
}
// Put in filler if the new object is smaller than the old.
@@ -3749,31 +4023,22 @@ Map* Heap::SymbolMapForString(String* string) {
if (InNewSpace(string)) return NULL;
// Find the corresponding symbol map for strings.
- Map* map = string->map();
- if (map == ascii_string_map()) {
- return ascii_symbol_map();
- }
- if (map == string_map()) {
- return symbol_map();
- }
- if (map == cons_string_map()) {
- return cons_symbol_map();
- }
- if (map == cons_ascii_string_map()) {
- return cons_ascii_symbol_map();
+ switch (string->map()->instance_type()) {
+ case STRING_TYPE: return symbol_map();
+ case ASCII_STRING_TYPE: return ascii_symbol_map();
+ case CONS_STRING_TYPE: return cons_symbol_map();
+ case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
+ case EXTERNAL_STRING_TYPE: return external_symbol_map();
+ case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
+ case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+ return external_symbol_with_ascii_data_map();
+ case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ return short_external_ascii_symbol_map();
+ case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+ return short_external_symbol_with_ascii_data_map();
+ default: return NULL; // No match found.
}
- if (map == external_string_map()) {
- return external_symbol_map();
- }
- if (map == external_ascii_string_map()) {
- return external_ascii_symbol_map();
- }
- if (map == external_string_with_ascii_data_map()) {
- return external_symbol_with_ascii_data_map();
- }
-
- // No match found.
- return NULL;
}
@@ -3814,12 +4079,12 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map(map);
+ reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(chars);
@@ -3863,7 +4128,7 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map(ascii_string_map());
+ HeapObject::cast(result)->set_map_unsafe(ascii_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -3898,7 +4163,7 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map(string_map());
+ HeapObject::cast(result)->set_map_unsafe(string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -3914,7 +4179,7 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
- reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
+ reinterpret_cast<FixedArray*>(result)->set_map_unsafe(fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@@ -3931,7 +4196,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) {
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
}
@@ -3943,13 +4208,13 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
if (InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
- dst->set_map(map);
+ dst->set_map_unsafe(map);
CopyBlock(dst->address() + kPointerSize,
src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- HeapObject::cast(obj)->set_map(map);
+ HeapObject::cast(obj)->set_map_unsafe(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -3969,7 +4234,7 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HeapObject* dst = HeapObject::cast(obj);
- dst->set_map(map);
+ dst->set_map_unsafe(map);
CopyBlock(
dst->address() + FixedDoubleArray::kLengthOffset,
src->address() + FixedDoubleArray::kLengthOffset,
@@ -3987,7 +4252,7 @@ MaybeObject* Heap::AllocateFixedArray(int length) {
}
// Initialize header.
FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map(fixed_array_map());
+ array->set_map_unsafe(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!InNewSpace(undefined_value()));
@@ -4035,7 +4300,7 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4068,7 +4333,7 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
+ reinterpret_cast<FixedArray*>(obj)->set_map_unsafe(fixed_array_map());
FixedArray::cast(obj)->set_length(length);
return obj;
}
@@ -4082,7 +4347,7 @@ MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
- reinterpret_cast<FixedDoubleArray*>(result)->set_map(
+ reinterpret_cast<FixedDoubleArray*>(result)->set_map_unsafe(
fixed_double_array_map());
reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
return result;
@@ -4099,7 +4364,8 @@ MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
+ reinterpret_cast<FixedDoubleArray*>(obj)->set_map_unsafe(
+ fixed_double_array_map());
FixedDoubleArray::cast(obj)->set_length(length);
return obj;
}
@@ -4135,7 +4401,7 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
+ reinterpret_cast<HeapObject*>(result)->set_map_unsafe(hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@@ -4148,7 +4414,7 @@ MaybeObject* Heap::AllocateGlobalContext() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(global_context_map());
+ context->set_map_unsafe(global_context_map());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
@@ -4162,7 +4428,7 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(function_context_map());
+ context->set_map_unsafe(function_context_map());
context->set_closure(function);
context->set_previous(function->context());
context->set_extension(NULL);
@@ -4182,7 +4448,7 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(catch_context_map());
+ context->set_map_unsafe(catch_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(name);
@@ -4200,7 +4466,7 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(with_context_map());
+ context->set_map_unsafe(with_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
@@ -4211,14 +4477,14 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
Context* previous,
- SerializedScopeInfo* scope_info) {
+ ScopeInfo* scope_info) {
Object* result;
{ MaybeObject* maybe_result =
- AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
+ AllocateFixedArrayWithHoles(scope_info->ContextLength());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(block_context_map());
+ context->set_map_unsafe(block_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(scope_info);
@@ -4227,14 +4493,11 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
}
-MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
- Object* result;
- { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- SerializedScopeInfo* scope_info =
- reinterpret_cast<SerializedScopeInfo*>(result);
- scope_info->set_map(serialized_scope_info_map());
+MaybeObject* Heap::AllocateScopeInfo(int length) {
+ FixedArray* scope_info;
+ MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
+ if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
+ scope_info->set_map_unsafe(scope_info_map());
return scope_info;
}
@@ -4262,7 +4525,95 @@ STRUCT_LIST(MAKE_CASE)
}
-bool Heap::IdleNotification() {
+bool Heap::IsHeapIterable() {
+ return (!old_pointer_space()->was_swept_conservatively() &&
+ !old_data_space()->was_swept_conservatively());
+}
+
+
+void Heap::EnsureHeapIsIterable() {
+ ASSERT(IsAllocationAllowed());
+ if (!IsHeapIterable()) {
+ CollectAllGarbage(kMakeHeapIterableMask);
+ }
+ ASSERT(IsHeapIterable());
+}
+
+
+bool Heap::IdleNotification(int hint) {
+ if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
+ return hint < 1000 ? true : IdleGlobalGC();
+ }
+
+ // By doing small chunks of GC work in each IdleNotification,
+ // perform a round of incremental GCs and after that wait until
+ // the mutator creates enough garbage to justify a new round.
+ // An incremental GC progresses as follows:
+ // 1. many incremental marking steps,
+ // 2. one old space mark-sweep-compact,
+ // 3. many lazy sweep steps.
+ // Use mark-sweep-compact events to count incremental GCs in a round.
+
+ intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
+ // The size factor is in range [3..100].
+ intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
+
+ if (incremental_marking()->IsStopped()) {
+ if (!IsSweepingComplete() &&
+ !AdvanceSweepers(static_cast<int>(step_size))) {
+ return false;
+ }
+ }
+
+ if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ if (EnoughGarbageSinceLastIdleRound()) {
+ StartIdleRound();
+ } else {
+ return true;
+ }
+ }
+
+ int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
+ mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
+ ms_count_at_last_idle_notification_ = ms_count_;
+
+ if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ FinishIdleRound();
+ return true;
+ }
+
+ if (incremental_marking()->IsStopped()) {
+ if (hint < 1000 && !WorthStartingGCWhenIdle()) {
+ FinishIdleRound();
+ return true;
+ }
+ incremental_marking()->Start();
+ }
+
+ // This flag prevents incremental marking from requesting GC via stack guard
+ idle_notification_will_schedule_next_gc_ = true;
+ incremental_marking()->Step(step_size);
+ idle_notification_will_schedule_next_gc_ = false;
+
+ if (incremental_marking()->IsComplete()) {
+ bool uncommit = false;
+ if (gc_count_at_last_idle_gc_ == gc_count_) {
+ // No GC since the last full GC, the mutator is probably not active.
+ isolate_->compilation_cache()->Clear();
+ uncommit = true;
+ }
+ CollectAllGarbage(kNoGCFlags);
+ gc_count_at_last_idle_gc_ = gc_count_;
+ if (uncommit) {
+ new_space_.Shrink();
+ UncommitFromSpace();
+ }
+ }
+ return false;
+}
+
+
+bool Heap::IdleGlobalGC() {
static const int kIdlesBeforeScavenge = 4;
static const int kIdlesBeforeMarkSweep = 7;
static const int kIdlesBeforeMarkCompact = 8;
@@ -4292,7 +4643,7 @@ bool Heap::IdleNotification() {
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(false);
+ CollectAllGarbage(kNoGCFlags);
} else {
CollectGarbage(NEW_SPACE);
}
@@ -4304,12 +4655,12 @@ bool Heap::IdleNotification() {
// generated code for cached functions.
isolate_->compilation_cache()->Clear();
- CollectAllGarbage(false);
+ CollectAllGarbage(kNoGCFlags);
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(true);
+ CollectAllGarbage(kNoGCFlags);
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
@@ -4319,7 +4670,7 @@ bool Heap::IdleNotification() {
contexts_disposed_ = 0;
} else {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(false);
+ CollectAllGarbage(kNoGCFlags);
last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
@@ -4339,8 +4690,11 @@ bool Heap::IdleNotification() {
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
- ASSERT(contexts_disposed_ == 0);
+ // Take into account that we might have decided to delay full collection
+ // because incremental marking is in progress.
+ ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
if (uncommit) UncommitFromSpace();
+
return finished;
}
@@ -4374,11 +4728,11 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
- PrintF("mark-compact GC : %d\n", mc_count_);
PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
+ PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4455,69 +4809,18 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
-}
-
-
-static void VerifyPointersUnderWatermark(
- PagedSpace* space,
- DirtyRegionCallback visit_dirty_region) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- while (it.has_next()) {
- Page* page = it.next();
- Address start = page->ObjectAreaStart();
- Address end = page->AllocationWatermark();
-
- HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
- start,
- end,
- visit_dirty_region,
- &DummyScavengePointer);
- }
-}
-
-
-static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- if (object->IsFixedArray()) {
- Address slot_address = object->address();
- Address end = object->address() + object->Size();
-
- while (slot_address < end) {
- HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
- // When we are not in GC the Heap::InNewSpace() predicate
- // checks that pointers which satisfy predicate point into
- // the active semispace.
- HEAP->InNewSpace(*slot);
- slot_address += kPointerSize;
- }
- }
- }
-}
-
-
void Heap::Verify() {
ASSERT(HasBeenSetup());
+ store_buffer()->Verify();
+
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
new_space_.Verify();
- VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
- old_pointer_space_->Verify(&dirty_regions_visitor);
- map_space_->Verify(&dirty_regions_visitor);
-
- VerifyPointersUnderWatermark(old_pointer_space_,
- &IteratePointersInDirtyRegion);
- VerifyPointersUnderWatermark(map_space_,
- &IteratePointersInDirtyMapsRegion);
- VerifyPointersUnderWatermark(lo_space_);
-
- VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
- VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+ old_pointer_space_->Verify(&visitor);
+ map_space_->Verify(&visitor);
VerifyPointersVisitor no_dirty_regions_visitor;
old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4526,6 +4829,7 @@ void Heap::Verify() {
lo_space_->Verify();
}
+
#endif // DEBUG
@@ -4621,275 +4925,221 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
- ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
- for (Address a = new_space_.FromSpaceLow();
- a < new_space_.FromSpaceHigh();
- a += kPointerSize) {
- Memory::Address_at(a) = kFromSpaceZapValue;
+ NewSpacePageIterator it(new_space_.FromSpaceStart(),
+ new_space_.FromSpaceEnd());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ for (Address cursor = page->body(), limit = page->body_limit();
+ cursor < limit;
+ cursor += kPointerSize) {
+ Memory::Address_at(cursor) = kFromSpaceZapValue;
+ }
}
}
#endif // DEBUG
-bool Heap::IteratePointersInDirtyRegion(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback copy_object_func) {
+void Heap::IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
Address slot_address = start;
- bool pointers_to_new_space_found = false;
+
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when we
+ // promote objects. But we should not record any slots in non-black
+ // objects. Grey object's slots would be rescanned.
+ // White object might not survive until the end of collection
+ // it would be a violation of the invariant to record it's slots.
+ bool record_slots = false;
+ if (incremental_marking()->IsCompacting()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+ record_slots = Marking::IsBlack(mark_bit);
+ }
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap->InNewSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- copy_object_func(reinterpret_cast<HeapObject**>(slot));
- if (heap->InNewSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- pointers_to_new_space_found = true;
+ Object* object = *slot;
+ // If the store buffer becomes overfull we mark pages as being exempt from
+ // the store buffer. These pages are scanned to find pointers that point
+ // to the new space. In that case we may hit newly promoted objects and
+ // fix the pointers before the promotion queue gets to them. Thus the 'if'.
+ if (object->IsHeapObject()) {
+ if (Heap::InFromSpace(object)) {
+ callback(reinterpret_cast<HeapObject**>(slot),
+ HeapObject::cast(object));
+ Object* new_object = *slot;
+ if (InNewSpace(new_object)) {
+ SLOW_ASSERT(Heap::InToSpace(new_object));
+ SLOW_ASSERT(new_object->IsHeapObject());
+ store_buffer_.EnterDirectlyIntoStoreBuffer(
+ reinterpret_cast<Address>(slot));
+ }
+ SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+ } else if (record_slots &&
+ MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+ mark_compact_collector()->RecordSlot(slot, slot, object);
}
}
slot_address += kPointerSize;
}
- return pointers_to_new_space_found;
}
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
- Address page = Page::FromAddress(addr)->ObjectAreaStart();
- return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
- Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
- return page + ((addr - page) / Map::kSize * Map::kSize);
+bool IsAMapPointerAddress(Object** addr) {
+ uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+ int mod = a % Map::kSize;
+ return mod >= Map::kPointerFieldsBeginOffset &&
+ mod < Map::kPointerFieldsEndOffset;
}
-static bool IteratePointersInDirtyMaps(Address start,
- Address end,
- ObjectSlotCallback copy_object_func) {
- ASSERT(MapStartAlign(start) == start);
- ASSERT(MapEndAlign(end) == end);
-
- Address map_address = start;
- bool pointers_to_new_space_found = false;
-
- Heap* heap = HEAP;
- while (map_address < end) {
- ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
- ASSERT(Memory::Object_at(map_address)->IsMap());
+bool EverythingsAPointer(Object** addr) {
+ return true;
+}
- Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
- Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
- if (Heap::IteratePointersInDirtyRegion(heap,
- pointer_fields_start,
- pointer_fields_end,
- copy_object_func)) {
- pointers_to_new_space_found = true;
+static void CheckStoreBuffer(Heap* heap,
+ Object** current,
+ Object** limit,
+ Object**** store_buffer_position,
+ Object*** store_buffer_top,
+ CheckStoreBufferFilter filter,
+ Address special_garbage_start,
+ Address special_garbage_end) {
+ Map* free_space_map = heap->free_space_map();
+ for ( ; current < limit; current++) {
+ Object* o = *current;
+ Address current_address = reinterpret_cast<Address>(current);
+ // Skip free space.
+ if (o == free_space_map) {
+ Address current_address = reinterpret_cast<Address>(current);
+ FreeSpace* free_space =
+ FreeSpace::cast(HeapObject::FromAddress(current_address));
+ int skip = free_space->Size();
+ ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
+ ASSERT(skip > 0);
+ current_address += skip - kPointerSize;
+ current = reinterpret_cast<Object**>(current_address);
+ continue;
+ }
+ // Skip the current linear allocation space between top and limit which is
+ // unmarked with the free space map, but can contain junk.
+ if (current_address == special_garbage_start &&
+ special_garbage_end != special_garbage_start) {
+ current_address = special_garbage_end - kPointerSize;
+ current = reinterpret_cast<Object**>(current_address);
+ continue;
+ }
+ if (!(*filter)(current)) continue;
+ ASSERT(current_address < special_garbage_start ||
+ current_address >= special_garbage_end);
+ ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+ // We have to check that the pointer does not point into new space
+ // without trying to cast it to a heap object since the hash field of
+ // a string can contain values like 1 and 3 which are tagged null
+ // pointers.
+ if (!heap->InNewSpace(o)) continue;
+ while (**store_buffer_position < current &&
+ *store_buffer_position < store_buffer_top) {
+ (*store_buffer_position)++;
+ }
+ if (**store_buffer_position != current ||
+ *store_buffer_position == store_buffer_top) {
+ Object** obj_start = current;
+ while (!(*obj_start)->IsMap()) obj_start--;
+ UNREACHABLE();
}
-
- map_address += Map::kSize;
}
-
- return pointers_to_new_space_found;
}
-bool Heap::IteratePointersInDirtyMapsRegion(
- Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback copy_object_func) {
- Address map_aligned_start = MapStartAlign(start);
- Address map_aligned_end = MapEndAlign(end);
-
- bool contains_pointers_to_new_space = false;
-
- if (map_aligned_start != start) {
- Address prev_map = map_aligned_start - Map::kSize;
- ASSERT(Memory::Object_at(prev_map)->IsMap());
-
- Address pointer_fields_start =
- Max(start, prev_map + Map::kPointerFieldsBeginOffset);
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+ OldSpace* space = old_pointer_space();
+ PageIterator pages(space);
- Address pointer_fields_end =
- Min(prev_map + Map::kPointerFieldsEndOffset, end);
+ store_buffer()->SortUniq();
- contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(heap,
- pointer_fields_start,
- pointer_fields_end,
- copy_object_func)
- || contains_pointers_to_new_space;
- }
-
- contains_pointers_to_new_space =
- IteratePointersInDirtyMaps(map_aligned_start,
- map_aligned_end,
- copy_object_func)
- || contains_pointers_to_new_space;
-
- if (map_aligned_end != end) {
- ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
- Address pointer_fields_start =
- map_aligned_end + Map::kPointerFieldsBeginOffset;
-
- Address pointer_fields_end =
- Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
- contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(heap,
- pointer_fields_start,
- pointer_fields_end,
- copy_object_func)
- || contains_pointers_to_new_space;
- }
+ while (pages.has_next()) {
+ Page* page = pages.next();
+ Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
- return contains_pointers_to_new_space;
-}
-
-
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
- Address slot_address = start;
- Page* page = Page::FromAddress(start);
+ Address end = page->ObjectAreaEnd();
- uint32_t marks = page->GetRegionMarks();
+ Object*** store_buffer_position = store_buffer()->Start();
+ Object*** store_buffer_top = store_buffer()->Top();
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (InFromSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- callback(reinterpret_cast<HeapObject**>(slot));
- if (InNewSpace(*slot)) {
- ASSERT((*slot)->IsHeapObject());
- marks |= page->GetRegionMaskForAddress(slot_address);
- }
- }
- slot_address += kPointerSize;
+ Object** limit = reinterpret_cast<Object**>(end);
+ CheckStoreBuffer(this,
+ current,
+ limit,
+ &store_buffer_position,
+ store_buffer_top,
+ &EverythingsAPointer,
+ space->top(),
+ space->limit());
}
-
- page->SetRegionMarks(marks);
}
-uint32_t Heap::IterateDirtyRegions(
- uint32_t marks,
- Address area_start,
- Address area_end,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback copy_object_func) {
- uint32_t newmarks = 0;
- uint32_t mask = 1;
-
- if (area_start >= area_end) {
- return newmarks;
- }
-
- Address region_start = area_start;
-
- // area_start does not necessarily coincide with start of the first region.
- // Thus to calculate the beginning of the next region we have to align
- // area_start by Page::kRegionSize.
- Address second_region =
- reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
- ~Page::kRegionAlignmentMask);
-
- // Next region might be beyond area_end.
- Address region_end = Min(second_region, area_end);
-
- if (marks & mask) {
- if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
- newmarks |= mask;
- }
- }
- mask <<= 1;
+void Heap::MapSpaceCheckStoreBuffer() {
+ MapSpace* space = map_space();
+ PageIterator pages(space);
- // Iterate subsequent regions which fully lay inside [area_start, area_end[.
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
+ store_buffer()->SortUniq();
- while (region_end <= area_end) {
- if (marks & mask) {
- if (visit_dirty_region(this,
- region_start,
- region_end,
- copy_object_func)) {
- newmarks |= mask;
- }
- }
+ while (pages.has_next()) {
+ Page* page = pages.next();
+ Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
+ Address end = page->ObjectAreaEnd();
- mask <<= 1;
- }
+ Object*** store_buffer_position = store_buffer()->Start();
+ Object*** store_buffer_top = store_buffer()->Top();
- if (region_start != area_end) {
- // A small piece of area left uniterated because area_end does not coincide
- // with region end. Check whether region covering last part of area is
- // dirty.
- if (marks & mask) {
- if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
- newmarks |= mask;
- }
- }
+ Object** limit = reinterpret_cast<Object**>(end);
+ CheckStoreBuffer(this,
+ current,
+ limit,
+ &store_buffer_position,
+ store_buffer_top,
+ &IsAMapPointerAddress,
+ space->top(),
+ space->limit());
}
-
- return newmarks;
}
-
-void Heap::IterateDirtyRegions(
- PagedSpace* space,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback copy_object_func,
- ExpectedPageWatermarkState expected_page_watermark_state) {
-
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- while (it.has_next()) {
- Page* page = it.next();
- uint32_t marks = page->GetRegionMarks();
-
- if (marks != Page::kAllRegionsCleanMarks) {
- Address start = page->ObjectAreaStart();
-
- // Do not try to visit pointers beyond page allocation watermark.
- // Page can contain garbage pointers there.
- Address end;
-
- if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
- page->IsWatermarkValid()) {
- end = page->AllocationWatermark();
- } else {
- end = page->CachedAllocationWatermark();
- }
-
- ASSERT(space == old_pointer_space_ ||
- (space == map_space_ &&
- ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
- page->SetRegionMarks(IterateDirtyRegions(marks,
- start,
- end,
- visit_dirty_region,
- copy_object_func));
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+ LargeObjectIterator it(lo_space());
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ // We only have code, sequential strings, or fixed arrays in large
+ // object space, and only fixed arrays can possibly contain pointers to
+ // the young generation.
+ if (object->IsFixedArray()) {
+ Object*** store_buffer_position = store_buffer()->Start();
+ Object*** store_buffer_top = store_buffer()->Top();
+ Object** current = reinterpret_cast<Object**>(object->address());
+ Object** limit =
+ reinterpret_cast<Object**>(object->address() + object->Size());
+ CheckStoreBuffer(this,
+ current,
+ limit,
+ &store_buffer_position,
+ store_buffer_top,
+ &EverythingsAPointer,
+ NULL,
+ NULL);
}
-
- // Mark page watermark as invalid to maintain watermark validity invariant.
- // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
- page->InvalidateWatermark(true);
}
}
+#endif
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
@@ -4941,8 +5191,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE &&
- mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ if (mode != VISIT_ALL_IN_SCAVENGE) {
isolate_->builtins()->IterateBuiltins(v);
}
v->Synchronize("builtins");
@@ -4986,11 +5235,20 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
bool Heap::ConfigureHeap(int max_semispace_size,
- int max_old_gen_size,
- int max_executable_size) {
+ intptr_t max_old_gen_size,
+ intptr_t max_executable_size) {
if (HasBeenSetup()) return false;
- if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+ if (max_semispace_size > 0) {
+ if (max_semispace_size < Page::kPageSize) {
+ max_semispace_size = Page::kPageSize;
+ if (FLAG_trace_gc) {
+ PrintF("Max semispace size cannot be less than %dkbytes\n",
+ Page::kPageSize >> 10);
+ }
+ }
+ max_semispace_size_ = max_semispace_size;
+ }
if (Snapshot::IsEnabled()) {
// If we are using a snapshot we always reserve the default amount
@@ -5000,6 +5258,10 @@ bool Heap::ConfigureHeap(int max_semispace_size,
// than the default reserved semispace size.
if (max_semispace_size_ > reserved_semispace_size_) {
max_semispace_size_ = reserved_semispace_size_;
+ if (FLAG_trace_gc) {
+ PrintF("Max semispace size cannot be more than %dkbytes\n",
+ reserved_semispace_size_ >> 10);
+ }
}
} else {
// If we are not using snapshots we reserve space for the actual
@@ -5025,8 +5287,12 @@ bool Heap::ConfigureHeap(int max_semispace_size,
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
external_allocation_limit_ = 10 * max_semispace_size_;
- // The old generation is paged.
- max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
+ // The old generation is paged and needs at least one page for each space.
+ int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+ max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
+ Page::kPageSize),
+ RoundUp(max_old_generation_size_,
+ Page::kPageSize));
configured_ = true;
return true;
@@ -5034,9 +5300,9 @@ bool Heap::ConfigureHeap(int max_semispace_size,
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
- FLAG_max_old_space_size * MB,
- FLAG_max_executable_size * MB);
+ return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
+ static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
+ static_cast<intptr_t>(FLAG_max_executable_size) * MB);
}
@@ -5064,7 +5330,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->os_error = OS::GetLastError();
isolate()->memory_allocator()->Available();
if (take_snapshot) {
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+ HeapIterator iterator;
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
@@ -5262,6 +5528,7 @@ class HeapDebugUtils {
bool Heap::Setup(bool create_heap_objects) {
#ifdef DEBUG
+ allocation_timeout_ = FLAG_gc_interval;
debug_utils_ = new HeapDebugUtils(this);
#endif
@@ -5280,31 +5547,21 @@ bool Heap::Setup(bool create_heap_objects) {
gc_initializer_mutex->Lock();
static bool initialized_gc = false;
if (!initialized_gc) {
- initialized_gc = true;
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
+ initialized_gc = true;
+ InitializeScavengingVisitorsTables();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
}
gc_initializer_mutex->Unlock();
MarkMapPointersAsEncoded(false);
- // Setup memory allocator and reserve a chunk of memory for new
- // space. The chunk is double the size of the requested reserved
- // new space size to ensure that we can find a pair of semispaces that
- // are contiguous and aligned to their size.
+ // Setup memory allocator.
if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
return false;
- void* chunk =
- isolate_->memory_allocator()->ReserveInitialChunk(
- 4 * reserved_semispace_size_);
- if (chunk == NULL) return false;
-
- // Align the pair of semispaces to their size, which must be a power
- // of 2.
- Address new_space_start =
- RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
- if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+
+ // Setup new space.
+ if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
return false;
}
@@ -5315,7 +5572,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->Setup(NULL, 0)) return false;
+ if (!old_pointer_space_->Setup()) return false;
// Initialize old data space.
old_data_space_ =
@@ -5324,7 +5581,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_DATA_SPACE,
NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
- if (!old_data_space_->Setup(NULL, 0)) return false;
+ if (!old_data_space_->Setup()) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
@@ -5339,29 +5596,27 @@ bool Heap::Setup(bool create_heap_objects) {
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
- if (!code_space_->Setup(NULL, 0)) return false;
+ if (!code_space_->Setup()) return false;
// Initialize map space.
- map_space_ = new MapSpace(this, FLAG_use_big_map_space
- ? max_old_generation_size_
- : MapSpace::kMaxMapPageIndex * Page::kPageSize,
- FLAG_max_map_space_pages,
- MAP_SPACE);
+ map_space_ = new MapSpace(this,
+ max_old_generation_size_,
+ FLAG_max_map_space_pages,
+ MAP_SPACE);
if (map_space_ == NULL) return false;
- if (!map_space_->Setup(NULL, 0)) return false;
+ if (!map_space_->Setup()) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
- if (!cell_space_->Setup(NULL, 0)) return false;
+ if (!cell_space_->Setup()) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(this, LO_SPACE);
+ lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
-
if (create_heap_objects) {
// Create initial maps.
if (!CreateInitialMaps()) return false;
@@ -5376,6 +5631,8 @@ bool Heap::Setup(bool create_heap_objects) {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
+ store_buffer()->Setup();
+
return true;
}
@@ -5402,7 +5659,6 @@ void Heap::TearDown() {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
- PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", get_max_gc_pause());
PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5452,6 +5708,9 @@ void Heap::TearDown() {
lo_space_ = NULL;
}
+ store_buffer()->TearDown();
+ incremental_marking()->TearDown();
+
isolate_->memory_allocator()->TearDown();
#ifdef DEBUG
@@ -5464,8 +5723,11 @@ void Heap::TearDown() {
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->Shrink();
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->ReleaseAllUnusedPages();
+ }
}
@@ -5668,98 +5930,54 @@ class HeapObjectsFilter {
};
-class FreeListNodesFilter : public HeapObjectsFilter {
- public:
- FreeListNodesFilter() {
- MarkFreeListNodes();
- }
-
- bool SkipObject(HeapObject* object) {
- if (object->IsMarked()) {
- object->ClearMark();
- return true;
- } else {
- return false;
- }
- }
-
- private:
- void MarkFreeListNodes() {
- Heap* heap = HEAP;
- heap->old_pointer_space()->MarkFreeListNodes();
- heap->old_data_space()->MarkFreeListNodes();
- MarkCodeSpaceFreeListNodes(heap);
- heap->map_space()->MarkFreeListNodes();
- heap->cell_space()->MarkFreeListNodes();
- }
-
- void MarkCodeSpaceFreeListNodes(Heap* heap) {
- // For code space, using FreeListNode::IsFreeListNode is OK.
- HeapObjectIterator iter(heap->code_space());
- for (HeapObject* obj = iter.next_object();
- obj != NULL;
- obj = iter.next_object()) {
- if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
- }
- }
-
- AssertNoAllocation no_alloc;
-};
-
-
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
UnreachableObjectsFilter() {
- MarkUnreachableObjects();
+ MarkReachableObjects();
+ }
+
+ ~UnreachableObjectsFilter() {
+ Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
}
bool SkipObject(HeapObject* object) {
- if (object->IsMarked()) {
- object->ClearMark();
- return true;
- } else {
- return false;
- }
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ return !mark_bit.Get();
}
private:
- class UnmarkingVisitor : public ObjectVisitor {
+ class MarkingVisitor : public ObjectVisitor {
public:
- UnmarkingVisitor() : list_(10) {}
+ MarkingVisitor() : marking_stack_(10) {}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- if (obj->IsMarked()) {
- obj->ClearMark();
- list_.Add(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ if (!mark_bit.Get()) {
+ mark_bit.Set();
+ marking_stack_.Add(obj);
}
}
}
- bool can_process() { return !list_.is_empty(); }
-
- void ProcessNext() {
- HeapObject* obj = list_.RemoveLast();
- obj->Iterate(this);
+ void TransitiveClosure() {
+ while (!marking_stack_.is_empty()) {
+ HeapObject* obj = marking_stack_.RemoveLast();
+ obj->Iterate(this);
+ }
}
private:
- List<HeapObject*> list_;
+ List<HeapObject*> marking_stack_;
};
- void MarkUnreachableObjects() {
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- obj->SetMark();
- }
- UnmarkingVisitor visitor;
- HEAP->IterateRoots(&visitor, VISIT_ALL);
- while (visitor.can_process())
- visitor.ProcessNext();
+ void MarkReachableObjects() {
+ Heap* heap = Isolate::Current()->heap();
+ MarkingVisitor visitor;
+ heap->IterateRoots(&visitor, VISIT_ALL);
+ visitor.TransitiveClosure();
}
AssertNoAllocation no_alloc;
@@ -5787,12 +6005,8 @@ HeapIterator::~HeapIterator() {
void HeapIterator::Init() {
// Start the iteration.
- space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
- new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+ space_iterator_ = new SpaceIterator;
switch (filtering_) {
- case kFilterFreeListNodes:
- filter_ = new FreeListNodesFilter;
- break;
case kFilterUnreachable:
filter_ = new UnreachableObjectsFilter;
break;
@@ -5928,6 +6142,11 @@ void PathTracer::TracePathFrom(Object** root) {
}
+static bool SafeIsGlobalContext(HeapObject* obj) {
+ return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+}
+
+
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
if (!(*p)->IsHeapObject()) return;
@@ -5946,7 +6165,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
return;
}
- bool is_global_context = obj->IsGlobalContext();
+ bool is_global_context = SafeIsGlobalContext(obj);
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6054,7 +6273,7 @@ static intptr_t CountTotalHolesSize() {
for (OldSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
- holes_size += space->Waste() + space->AvailableFree();
+ holes_size += space->Waste() + space->Available();
}
return holes_size;
}
@@ -6065,17 +6284,10 @@ GCTracer::GCTracer(Heap* heap)
start_size_(0),
gc_count_(0),
full_gc_count_(0),
- is_compacting_(false),
- marked_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
heap_(heap) {
- // These two fields reflect the state of the previous full collection.
- // Set them before they are changed by the collector.
- previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
- previous_marked_count_ =
- heap_->mark_compact_collector_.previous_marked_count();
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = heap_->SizeOfObjects();
@@ -6092,6 +6304,14 @@ GCTracer::GCTracer(Heap* heap)
if (heap_->last_gc_end_timestamp_ > 0) {
spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
}
+
+ steps_count_ = heap_->incremental_marking()->steps_count();
+ steps_took_ = heap_->incremental_marking()->steps_took();
+ longest_step_ = heap_->incremental_marking()->longest_step();
+ steps_count_since_last_gc_ =
+ heap_->incremental_marking()->steps_count_since_last_gc();
+ steps_took_since_last_gc_ =
+ heap_->incremental_marking()->steps_took_since_last_gc();
}
@@ -6126,7 +6346,21 @@ GCTracer::~GCTracer() {
SizeOfHeapObjects());
if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms.\n", time);
+ PrintF("%d ms", time);
+ if (steps_count_ > 0) {
+ if (collector_ == SCAVENGER) {
+ PrintF(" (+ %d ms in %d steps since last GC)",
+ static_cast<int>(steps_took_since_last_gc_),
+ steps_count_since_last_gc_);
+ } else {
+ PrintF(" (+ %d ms in %d steps since start of marking, "
+ "biggest step %f ms)",
+ static_cast<int>(steps_took_),
+ steps_count_,
+ longest_step_);
+ }
+ }
+ PrintF(".\n");
} else {
PrintF("pause=%d ", time);
PrintF("mutator=%d ",
@@ -6138,8 +6372,7 @@ GCTracer::~GCTracer() {
PrintF("s");
break;
case MARK_COMPACTOR:
- PrintF("%s",
- heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
+ PrintF("ms");
break;
default:
UNREACHABLE();
@@ -6150,7 +6383,19 @@ GCTracer::~GCTracer() {
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
- PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+ PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
+ PrintF("new_new=%d ",
+ static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
+ PrintF("root_new=%d ",
+ static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
+ PrintF("old_new=%d ",
+ static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
+ PrintF("compaction_ptrs=%d ",
+ static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
+ PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
+ Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
+ PrintF("misc_compaction=%d ",
+ static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
@@ -6161,6 +6406,14 @@ GCTracer::~GCTracer() {
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
+ if (collector_ == SCAVENGER) {
+ PrintF("stepscount=%d ", steps_count_since_last_gc_);
+ PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+ } else {
+ PrintF("stepscount=%d ", steps_count_);
+ PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+ }
+
PrintF("\n");
}
@@ -6173,8 +6426,7 @@ const char* GCTracer::CollectorString() {
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
- return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
- : "Mark-sweep";
+ return "Mark-sweep";
}
return "Unknown GC";
}
@@ -6256,7 +6508,9 @@ void TranscendentalCache::Clear() {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+ if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+ continue;
+ }
if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
@@ -6266,12 +6520,16 @@ void ExternalStringTable::CleanUp() {
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+ if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+ continue;
+ }
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
- Verify();
+ if (FLAG_verify_heap) {
+ Verify();
+ }
}
@@ -6281,4 +6539,53 @@ void ExternalStringTable::TearDown() {
}
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+ chunk->set_next_chunk(chunks_queued_for_free_);
+ chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+ if (chunks_queued_for_free_ == NULL) return;
+ MemoryChunk* next;
+ MemoryChunk* chunk;
+ for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+ next = chunk->next_chunk();
+ chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+ if (chunk->owner()->identity() == LO_SPACE) {
+ // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+ // If FromAnyPointerAddress encounters a slot that belongs to a large
+ // chunk queued for deletion it will fail to find the chunk because
+ // it try to perform a search in the list of pages owned by of the large
+ // object space and queued chunks were detached from that list.
+ // To work around this we split large chunk into normal kPageSize aligned
+ // pieces and initialize size, owner and flags field of every piece.
+ // If FromAnyPointerAddress encounters a slot that belongs to one of
+ // these smaller pieces it will treat it as a slot on a normal Page.
+ MemoryChunk* inner = MemoryChunk::FromAddress(
+ chunk->address() + Page::kPageSize);
+ MemoryChunk* inner_last = MemoryChunk::FromAddress(
+ chunk->address() + chunk->size() - 1);
+ while (inner <= inner_last) {
+ // Size of a large chunk is always a multiple of
+ // OS::AllocateAlignment() so there is always
+ // enough space for a fake MemoryChunk header.
+ inner->set_size(Page::kPageSize);
+ inner->set_owner(lo_space());
+ inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+ inner = MemoryChunk::FromAddress(
+ inner->address() + Page::kPageSize);
+ }
+ }
+ }
+ isolate_->heap()->store_buffer()->Compact();
+ isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+ for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+ next = chunk->next_chunk();
+ isolate_->memory_allocator()->Free(chunk);
+ }
+ chunks_queued_for_free_ = NULL;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index d81ff6cad..741e3d977 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -32,11 +32,15 @@
#include "allocation.h"
#include "globals.h"
+#include "incremental-marking.h"
#include "list.h"
#include "mark-compact.h"
+#include "objects-visiting.h"
#include "spaces.h"
#include "splay-tree-inl.h"
+#include "store-buffer.h"
#include "v8-counters.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -48,30 +52,43 @@ inline Heap* _inline_get_heap_();
// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V) \
- /* Put the byte array map early. We need it to be in place by the time */ \
- /* the deserializer hits the next page, since it wants to put a byte */ \
- /* array in the unused space at the end of the page. */ \
+#define STRONG_ROOT_LIST(V) \
V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, free_space_map, FreeSpaceMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
- V(Object, undefined_value, UndefinedValue) \
- V(Object, the_hole_value, TheHoleValue) \
- V(Object, null_value, NullValue) \
- V(Object, true_value, TrueValue) \
- V(Object, false_value, FalseValue) \
- V(Object, arguments_marker, ArgumentsMarker) \
+ V(Smi, store_buffer_top, StoreBufferTop) \
+ V(Oddball, undefined_value, UndefinedValue) \
+ V(Oddball, the_hole_value, TheHoleValue) \
+ V(Oddball, null_value, NullValue) \
+ V(Oddball, true_value, TrueValue) \
+ V(Oddball, false_value, FalseValue) \
+ V(Map, global_property_cell_map, GlobalPropertyCellMap) \
+ V(Map, shared_function_info_map, SharedFunctionInfoMap) \
+ V(Map, meta_map, MetaMap) \
+ V(Map, ascii_symbol_map, AsciiSymbolMap) \
+ V(Map, ascii_string_map, AsciiStringMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
- V(Map, serialized_scope_info_map, SerializedScopeInfoMap) \
+ V(Map, code_map, CodeMap) \
+ V(Map, scope_info_map, ScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
- V(Map, meta_map, MetaMap) \
V(Map, hash_table_map, HashTableMap) \
+ V(FixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
+ V(String, empty_string, EmptyString) \
+ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
+ V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
+ V(Oddball, arguments_marker, ArgumentsMarker) \
+ /* The first 32 roots above this line should be boring from a GC point of */ \
+ /* view. This means they are never in new space and never on a page that */ \
+ /* is being compacted. */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -79,19 +96,12 @@ inline Heap* _inline_get_heap_();
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(Object, termination_exception, TerminationException) \
- V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
- V(String, empty_string, EmptyString) \
- V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Map, string_map, StringMap) \
- V(Map, ascii_string_map, AsciiStringMap) \
V(Map, symbol_map, SymbolMap) \
V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
- V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, cons_symbol_map, ConsSymbolMap) \
V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
V(Map, external_symbol_map, ExternalSymbolMap) \
@@ -100,6 +110,16 @@ inline Heap* _inline_get_heap_();
V(Map, external_string_map, ExternalStringMap) \
V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
+ V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
+ V(Map, \
+ short_external_symbol_with_ascii_data_map, \
+ ShortExternalSymbolWithAsciiDataMap) \
+ V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
+ V(Map, short_external_string_map, ShortExternalStringMap) \
+ V(Map, \
+ short_external_string_with_ascii_data_map, \
+ ShortExternalStringWithAsciiDataMap) \
+ V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
@@ -116,14 +136,12 @@ inline Heap* _inline_get_heap_();
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
- V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
- V(Map, global_property_cell_map, GlobalPropertyCellMap) \
- V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
- V(Object, nan_value, NanValue) \
- V(Object, minus_zero_value, MinusZeroValue) \
+ V(HeapNumber, nan_value, NanValue) \
+ V(HeapNumber, infinity_value, InfinityValue) \
+ V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
@@ -226,7 +244,9 @@ inline Heap* _inline_get_heap_();
V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \
V(dot_symbol, ".") \
- V(anonymous_function_symbol, "(anonymous function)")
+ V(anonymous_function_symbol, "(anonymous function)") \
+ V(infinity_symbol, "Infinity") \
+ V(minus_infinity_symbol, "-Infinity")
// Forward declarations.
class GCTracer;
@@ -238,10 +258,26 @@ class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
Object** pointer);
-typedef bool (*DirtyRegionCallback)(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback copy_object_func);
+class StoreBufferRebuilder {
+ public:
+ explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+ : store_buffer_(store_buffer) {
+ }
+
+ void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+ StoreBuffer* store_buffer_;
+
+ // We record in this variable how full the store buffer was when we started
+ // iterating over the current page, finding pointers to new space. If the
+ // store buffer overflows again we can exempt the page from the store buffer
+ // by rewinding to this point instead of having to search the store buffer.
+ Object*** start_of_current_page_;
+ // The current page we are scanning in the store buffer iterator.
+ MemoryChunk* current_page_;
+};
+
// The all static Heap captures the interface to the global object heap.
@@ -256,32 +292,103 @@ class HeapDebugUtils;
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
public:
- PromotionQueue() : front_(NULL), rear_(NULL) { }
+ explicit PromotionQueue(Heap* heap)
+ : front_(NULL),
+ rear_(NULL),
+ limit_(NULL),
+ emergency_stack_(0),
+ heap_(heap) { }
+
+ void Initialize();
+
+ void Destroy() {
+ ASSERT(is_empty());
+ delete emergency_stack_;
+ emergency_stack_ = NULL;
+ }
- void Initialize(Address start_address) {
- front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+ inline void ActivateGuardIfOnTheSamePage();
+
+ Page* GetHeadPage() {
+ return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
}
- bool is_empty() { return front_ <= rear_; }
+ void SetNewLimit(Address limit) {
+ if (!guard_) {
+ return;
+ }
+
+ ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
+ limit_ = reinterpret_cast<intptr_t*>(limit);
+
+ if (limit_ <= rear_) {
+ return;
+ }
+
+ RelocateQueueHead();
+ }
+
+ bool is_empty() {
+ return (front_ == rear_) &&
+ (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+ }
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
+ ASSERT(!is_empty());
+ if (front_ == rear_) {
+ Entry e = emergency_stack_->RemoveLast();
+ *target = e.obj_;
+ *size = e.size_;
+ return;
+ }
+
+ if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
+ NewSpacePage* front_page =
+ NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
+ ASSERT(!front_page->prev_page()->is_anchor());
+ front_ =
+ reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
+ }
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
// Assert no underflow.
- ASSERT(front_ >= rear_);
+ SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+ reinterpret_cast<Address>(front_));
}
private:
- // The front of the queue is higher in memory than the rear.
+ // The front of the queue is higher in the memory page chain than the rear.
intptr_t* front_;
intptr_t* rear_;
+ intptr_t* limit_;
+
+ bool guard_;
+
+ static const int kEntrySizeInWords = 2;
+
+ struct Entry {
+ Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+
+ HeapObject* obj_;
+ int size_;
+ };
+ List<Entry>* emergency_stack_;
+
+ Heap* heap_;
+
+ void RelocateQueueHead();
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
+typedef void (*ScavengingCallback)(Map* map,
+ HeapObject** slot,
+ HeapObject* object);
+
+
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
@@ -327,8 +434,8 @@ class Heap {
// Configure heap size before setup. Return false if the heap has been
// setup already.
bool ConfigureHeap(int max_semispace_size,
- int max_old_gen_size,
- int max_executable_size);
+ intptr_t max_old_gen_size,
+ intptr_t max_executable_size);
bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
@@ -456,6 +563,7 @@ class Heap {
// size, but keeping the original prototype. The receiver must have at least
// the size of the new object. The object is reinitialized and behaves as an
// object that has been freshly allocated.
+ // Returns failure if an error occured, otherwise object.
MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
InstanceType type,
int size);
@@ -484,8 +592,10 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT MaybeObject* AllocateMap(
+ InstanceType instance_type,
+ int instance_size,
+ ElementsKind elements_kind = FAST_ELEMENTS);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -498,7 +608,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateCodeCache();
// Allocates a serialized scope info.
- MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length);
+ MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@@ -688,7 +798,7 @@ class Heap {
// Allocate a block context.
MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
Context* previous,
- SerializedScopeInfo* info);
+ ScopeInfo* info);
// Allocates a new utility object in the old generation.
MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
@@ -737,13 +847,15 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
+ MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
+ int32_t value, PretenureFlag pretenure = NOT_TENURED);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
+ MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
+ uint32_t value, PretenureFlag pretenure = NOT_TENURED);
// Allocates a new foreign object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -796,9 +908,9 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
- ExternalAsciiString::Resource* resource);
+ const ExternalAsciiString::Resource* resource);
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
- ExternalTwoByteString::Resource* resource);
+ const ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
@@ -885,13 +997,24 @@ class Heap {
// collect more garbage.
inline bool CollectGarbage(AllocationSpace space);
- // Performs a full garbage collection. Force compaction if the
- // parameter is true.
- void CollectAllGarbage(bool force_compaction);
+ static const int kNoGCFlags = 0;
+ static const int kMakeHeapIterableMask = 1;
+
+ // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
+ // non-zero, then the slower precise sweeper is used, which leaves the heap
+ // in a state where we can iterate over the heap visiting all objects.
+ void CollectAllGarbage(int flags);
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage();
+ // Check whether the heap is currently iterable.
+ bool IsHeapIterable();
+
+ // Ensure that we have swept all spaces in such a way that we can iterate
+ // over all objects. May cause a GC.
+ void EnsureHeapIsIterable();
+
// Notify the heap that a context has been disposed.
int NotifyContextDisposed() { return ++contexts_disposed_; }
@@ -899,6 +1022,20 @@ class Heap {
// ensure correct callback for weak global handles.
void PerformScavenge();
+ inline void increment_scan_on_scavenge_pages() {
+ scan_on_scavenge_pages_++;
+ if (FLAG_gc_verbose) {
+ PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+ }
+ }
+
+ inline void decrement_scan_on_scavenge_pages() {
+ scan_on_scavenge_pages_--;
+ if (FLAG_gc_verbose) {
+ PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+ }
+ }
+
PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG
@@ -925,6 +1062,8 @@ class Heap {
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
+ // TODO(1490): Try removing the unchecked accessors, now that GC marking does
+ // not corrupt the stack.
#define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
@@ -958,6 +1097,9 @@ class Heap {
}
Object* global_contexts_list() { return global_contexts_list_; }
+ // Number of mark-sweeps.
+ int ms_count() { return ms_count_; }
+
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
@@ -965,60 +1107,16 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
- enum ExpectedPageWatermarkState {
- WATERMARK_SHOULD_BE_VALID,
- WATERMARK_CAN_BE_INVALID
- };
-
- // For each dirty region on a page in use from an old space call
- // visit_dirty_region callback.
- // If either visit_dirty_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- // All pages will be marked as having invalid watermark upon
- // iteration completion.
- void IterateDirtyRegions(
- PagedSpace* space,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback callback,
- ExpectedPageWatermarkState expected_page_watermark_state);
-
- // Interpret marks as a bitvector of dirty marks for regions of size
- // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
- // memory interval from start to top. For each dirty region call a
- // visit_dirty_region callback. Return updated bitvector of dirty marks.
- uint32_t IterateDirtyRegions(uint32_t marks,
- Address start,
- Address end,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback callback);
-
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
- // Update dirty marks for page containing start address.
void IterateAndMarkPointersToFromSpace(Address start,
Address end,
ObjectSlotCallback callback);
- // Iterate pointers to new space found in memory interval from start to end.
- // Return true if pointers to new space was found.
- static bool IteratePointersInDirtyRegion(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback callback);
-
-
- // Iterate pointers to new space found in memory interval from start to end.
- // This interval is considered to belong to the map space.
- // Return true if pointers to new space was found.
- static bool IteratePointersInDirtyMapsRegion(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback callback);
-
-
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
+ inline bool InNewSpace(Address addr);
+ inline bool InNewSpacePage(Address addr);
inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object);
@@ -1057,11 +1155,19 @@ class Heap {
roots_[kEmptyScriptRootIndex] = script;
}
+ void public_set_store_buffer_top(Address* top) {
+ roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
+ }
+
// Update the next script id.
inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
- Object** roots_address() { return roots_; }
+ Object** roots_array_start() { return roots_; }
+
+ Address* store_buffer_top_address() {
+ return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+ }
// Get address of global contexts list for serialization support.
Object** global_contexts_list_address() {
@@ -1075,6 +1181,10 @@ class Heap {
// Verify the heap is in its normal state before or after a GC.
void Verify();
+ void OldPointerSpaceCheckStoreBuffer();
+ void MapSpaceCheckStoreBuffer();
+ void LargeObjectSpaceCheckStoreBuffer();
+
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
@@ -1170,26 +1280,59 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
+ inline intptr_t PromotedTotalSize() {
+ return PromotedSpaceSize() + PromotedExternalMemorySize();
+ }
+
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
- bool OldGenerationPromotionLimitReached() {
- return (PromotedSpaceSize() + PromotedExternalMemorySize())
- > old_gen_promotion_limit_;
+ inline bool OldGenerationPromotionLimitReached() {
+ return PromotedTotalSize() > old_gen_promotion_limit_;
}
- intptr_t OldGenerationSpaceAvailable() {
- return old_gen_allocation_limit_ -
- (PromotedSpaceSize() + PromotedExternalMemorySize());
+ inline intptr_t OldGenerationSpaceAvailable() {
+ return old_gen_allocation_limit_ - PromotedTotalSize();
}
- // True if we have reached the allocation limit in the old generation that
- // should artificially cause a GC right now.
- bool OldGenerationAllocationLimitReached() {
- return OldGenerationSpaceAvailable() < 0;
+ static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
+ static const intptr_t kMinimumAllocationLimit =
+ 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+
+ // When we sweep lazily we initially guess that there is no garbage on the
+ // heap and set the limits for the next GC accordingly. As we sweep we find
+ // out that some of the pages contained garbage and we have to adjust
+ // downwards the size of the heap. This means the limits that control the
+ // timing of the next GC also need to be adjusted downwards.
+ void LowerOldGenLimits(intptr_t adjustment) {
+ size_of_old_gen_at_last_old_space_gc_ -= adjustment;
+ old_gen_promotion_limit_ =
+ OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+ old_gen_allocation_limit_ =
+ OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
- // Can be called when the embedding application is idle.
- bool IdleNotification();
+ intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
+ const int divisor = FLAG_stress_compaction ? 10 : 3;
+ intptr_t limit =
+ Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
+ limit += new_space_.Capacity();
+ limit *= old_gen_limit_factor_;
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
+ }
+
+ intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
+ const int divisor = FLAG_stress_compaction ? 8 : 2;
+ intptr_t limit =
+ Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
+ limit += new_space_.Capacity();
+ limit *= old_gen_limit_factor_;
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
+ }
+
+ // Implements the corresponding V8 API function.
+ bool IdleNotification(int hint);
// Declare all the root indices.
enum RootListIndex {
@@ -1213,6 +1356,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
+ MUST_USE_RESULT MaybeObject* Uint32ToString(
+ uint32_t value, bool check_number_string_cache = true);
Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType(
@@ -1224,18 +1369,10 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
-
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
- inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
-
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
@@ -1244,9 +1381,31 @@ class Heap {
survived_since_last_expansion_ += survived;
}
+ inline bool NextGCIsLikelyToBeFull() {
+ if (FLAG_gc_global) return true;
+
+ intptr_t total_promoted = PromotedTotalSize();
+
+ intptr_t adjusted_promotion_limit =
+ old_gen_promotion_limit_ - new_space_.Capacity();
+
+ if (total_promoted >= adjusted_promotion_limit) return true;
+
+ intptr_t adjusted_allocation_limit =
+ old_gen_allocation_limit_ - new_space_.Capacity() / 5;
+
+ if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+
+ return false;
+ }
+
+
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
+ void UpdateReferencesInExternalStringTable(
+ ExternalStringTableUpdaterCallback updater_func);
+
void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to
@@ -1263,6 +1422,9 @@ class Heap {
GCTracer* tracer() { return tracer_; }
+ // Returns the size of objects residing in non new spaces.
+ intptr_t PromotedSpaceSize();
+
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
total_regexp_code_generated_ += size;
@@ -1281,6 +1443,29 @@ class Heap {
return &mark_compact_collector_;
}
+ StoreBuffer* store_buffer() {
+ return &store_buffer_;
+ }
+
+ Marking* marking() {
+ return &marking_;
+ }
+
+ IncrementalMarking* incremental_marking() {
+ return &incremental_marking_;
+ }
+
+ bool IsSweepingComplete() {
+ return old_data_space()->IsSweepingComplete() &&
+ old_pointer_space()->IsSweepingComplete();
+ }
+
+ bool AdvanceSweepers(int step_size) {
+ bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+ sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+ return sweeping_complete;
+ }
+
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@@ -1291,16 +1476,35 @@ class Heap {
}
inline Isolate* isolate();
- bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
- void CallGlobalGCPrologueCallback() {
+ inline void CallGlobalGCPrologueCallback() {
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
}
- void CallGlobalGCEpilogueCallback() {
+ inline void CallGlobalGCEpilogueCallback() {
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
}
+ inline bool OldGenerationAllocationLimitReached();
+
+ inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+ scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+ }
+
+ void QueueMemoryChunkForFree(MemoryChunk* chunk);
+ void FreeQueuedChunks();
+
+ // Completely clear the Instanceof cache (to stop it keeping objects alive
+ // around a GC).
+ inline void CompletelyClearInstanceofCache();
+
+ // The roots that have an index less than this are always in old space.
+ static const int kOldSpaceRoots = 0x20;
+
+ bool idle_notification_will_schedule_next_gc() {
+ return idle_notification_will_schedule_next_gc_;
+ }
+
private:
Heap();
@@ -1308,12 +1512,12 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
+ intptr_t code_range_size_;
int reserved_semispace_size_;
int max_semispace_size_;
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
- intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1328,6 +1532,8 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
+ int scan_on_scavenge_pages_;
+
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
@@ -1344,13 +1550,9 @@ class Heap {
HeapState gc_state_;
int gc_post_processing_depth_;
- // Returns the size of object residing in non new spaces.
- intptr_t PromotedSpaceSize();
-
// Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize();
- int mc_count_; // how many mark-compact collections happened
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@@ -1358,7 +1560,10 @@ class Heap {
int unflattened_strings_length_;
#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value) { \
+ inline void set_##name(type* value) { \
+ /* The deserializer makes use of the fact that these common roots are */ \
+ /* never in new space and never on a page that is being compacted. */ \
+ ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
roots_[k##camel_name##RootIndex] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
@@ -1379,6 +1584,10 @@ class Heap {
HeapDebugUtils* debug_utils_;
#endif // DEBUG
+ // Indicates that the new space should be kept small due to high promotion
+ // rates caused by the mutator allocating a lot of long-lived objects.
+ bool new_space_high_promotion_mode_active_;
+
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
@@ -1389,6 +1598,13 @@ class Heap {
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;
+ // Sometimes the heuristics dictate that those limits are increased. This
+ // variable records that fact.
+ int old_gen_limit_factor_;
+
+ // Used to adjust the limits that control the timing of the next GC.
+ intptr_t size_of_old_gen_at_last_old_space_gc_;
+
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
intptr_t external_allocation_limit_;
@@ -1408,6 +1624,8 @@ class Heap {
Object* global_contexts_list_;
+ StoreBufferRebuilder store_buffer_rebuilder_;
+
struct StringTypeTable {
InstanceType type;
int size;
@@ -1465,13 +1683,11 @@ class Heap {
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
- static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
void MarkMapPointersAsEncoded(bool encoded) {
- gc_safe_size_of_old_object_ = encoded
- ? &GcSafeSizeOfOldObjectWithEncodedMap
- : &GcSafeSizeOfOldObject;
+ ASSERT(!encoded);
+ gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
@@ -1483,11 +1699,10 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
- static const intptr_t kMinimumPromotionLimit = 2 * MB;
- static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline void UpdateOldSpaceLimits();
+
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@@ -1522,8 +1737,6 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
- void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
// Performs a minor collection in new generation.
void Scavenge();
@@ -1532,16 +1745,15 @@ class Heap {
Object** pointer);
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+ static void ScavengeStoreBufferCallback(Heap* heap,
+ MemoryChunk* page,
+ StoreBufferEvent event);
// Performs a major collection in the whole heap.
void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
- void MarkCompactPrologue(bool is_compacting);
-
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
+ void MarkCompactPrologue();
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
@@ -1551,12 +1763,11 @@ class Heap {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Initializes a function with a shared part and prototype.
- // Returns the function.
// Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* InitializeFunction(
+ inline void InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
@@ -1617,10 +1828,40 @@ class Heap {
return survival_rate_trend() == INCREASING;
}
+ bool IsDecreasingSurvivalTrend() {
+ return survival_rate_trend() == DECREASING;
+ }
+
bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
+ void SelectScavengingVisitorsTable();
+
+ void StartIdleRound() {
+ mark_sweeps_since_idle_round_started_ = 0;
+ ms_count_at_last_idle_notification_ = ms_count_;
+ }
+
+ void FinishIdleRound() {
+ mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
+ scavenges_since_last_idle_round_ = 0;
+ }
+
+ bool EnoughGarbageSinceLastIdleRound() {
+ return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
+ }
+
+ bool WorthStartingGCWhenIdle() {
+ if (contexts_disposed_ > 0) {
+ return true;
+ }
+ return incremental_marking()->WorthActivating();
+ }
+
+ // Returns true if no more GC work is left.
+ bool IdleGlobalGC();
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1640,15 +1881,25 @@ class Heap {
MarkCompactCollector mark_compact_collector_;
- // This field contains the meaning of the WATERMARK_INVALIDATED flag.
- // Instead of clearing this flag from all pages we just flip
- // its meaning at the beginning of a scavenge.
- intptr_t page_watermark_invalidated_mark_;
+ StoreBuffer store_buffer_;
+
+ Marking marking_;
+
+ IncrementalMarking incremental_marking_;
int number_idle_notifications_;
unsigned int last_idle_notification_gc_count_;
bool last_idle_notification_gc_count_init_;
+ bool idle_notification_will_schedule_next_gc_;
+ int mark_sweeps_since_idle_round_started_;
+ int ms_count_at_last_idle_notification_;
+ unsigned int gc_count_at_last_idle_gc_;
+ int scavenges_since_last_idle_round_;
+
+ static const int kMaxMarkSweepsInIdleRound = 7;
+ static const int kIdleScavengeThreshold = 5;
+
// Shared state read by the scavenge collector and set by ScavengeObject.
PromotionQueue promotion_queue_;
@@ -1658,7 +1909,9 @@ class Heap {
ExternalStringTable external_string_table_;
- bool is_safe_to_read_maps_;
+ VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+ MemoryChunk* chunks_queued_for_free_;
friend class Factory;
friend class GCTracer;
@@ -1757,29 +2010,6 @@ class VerifyPointersVisitor: public ObjectVisitor {
}
}
};
-
-
-// Visitor class to verify interior pointers in spaces that use region marks
-// to keep track of intergenerational references.
-// As VerifyPointersVisitor but also checks that dirty marks are set
-// for regions covering intergenerational references.
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->Contains(object));
- ASSERT(object->map()->IsMap());
- if (HEAP->InNewSpace(object)) {
- ASSERT(HEAP->InToSpace(object));
- Address addr = reinterpret_cast<Address>(current);
- ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
- }
- }
- }
- }
-};
#endif
@@ -1854,7 +2084,6 @@ class HeapIterator BASE_EMBEDDED {
public:
enum HeapObjectsFiltering {
kNoFiltering,
- kFilterFreeListNodes,
kFilterUnreachable
};
@@ -2078,7 +2307,13 @@ class GCTracer BASE_EMBEDDED {
MC_MARK,
MC_SWEEP,
MC_SWEEP_NEWSPACE,
- MC_COMPACT,
+ MC_EVACUATE_PAGES,
+ MC_UPDATE_NEW_TO_NEW_POINTERS,
+ MC_UPDATE_ROOT_TO_NEW_POINTERS,
+ MC_UPDATE_OLD_TO_NEW_POINTERS,
+ MC_UPDATE_POINTERS_TO_EVACUATED,
+ MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
+ MC_UPDATE_MISC_POINTERS,
MC_FLUSH_CODE,
kNumberOfScopes
};
@@ -2112,16 +2347,6 @@ class GCTracer BASE_EMBEDDED {
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
- // Sets the flag that this is a compacting full GC.
- void set_is_compacting() { is_compacting_ = true; }
- bool is_compacting() const { return is_compacting_; }
-
- // Increment and decrement the count of marked objects.
- void increment_marked_count() { ++marked_count_; }
- void decrement_marked_count() { --marked_count_; }
-
- int marked_count() { return marked_count_; }
-
void increment_promoted_objects_size(int object_size) {
promoted_objects_size_ += object_size;
}
@@ -2146,23 +2371,6 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
- // True if the current GC is a compacting full collection, false
- // otherwise.
- bool is_compacting_;
-
- // True if the *previous* full GC cwas a compacting collection (will be
- // false if there has not been a previous full GC).
- bool previous_has_compacted_;
-
- // On a full GC, a count of the number of marked objects. Incremented
- // when an object is marked and decremented when an object's mark bit is
- // cleared. Will be zero on a scavenge collection.
- int marked_count_;
-
- // The count from the end of the previous full GC. Will be zero if there
- // was no previous full GC.
- int previous_marked_count_;
-
// Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes];
@@ -2181,6 +2389,13 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
+ // Incremental marking steps counters.
+ int steps_count_;
+ double steps_took_;
+ double longest_step_;
+ int steps_count_since_last_gc_;
+ double steps_took_since_last_gc_;
+
Heap* heap_;
};
@@ -2292,6 +2507,46 @@ class WeakObjectRetainer {
};
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+ static bool IsMarked(HeapObject* object) {
+ return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+ }
+
+ static void ClearMark(HeapObject* object) {
+ uintptr_t map_word = object->map_word().ToRawValue();
+ object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+ ASSERT(!IsMarked(object));
+ }
+
+ static void SetMark(HeapObject* object) {
+ uintptr_t map_word = object->map_word().ToRawValue();
+ object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+ ASSERT(IsMarked(object));
+ }
+
+ static Map* MapOfMarkedObject(HeapObject* object) {
+ uintptr_t map_word = object->map_word().ToRawValue();
+ return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+ }
+
+ static int SizeOfMarkedObject(HeapObject* object) {
+ return object->SizeFromMap(MapOfMarkedObject(object));
+ }
+
+ private:
+ static const uintptr_t kNotMarkedBit = 0x1;
+ STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
+};
+
+
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
@@ -2350,7 +2605,6 @@ class PathTracer : public ObjectVisitor {
};
#endif // DEBUG || LIVE_OBJECT_LIST
-
} } // namespace v8::internal
#undef HEAP
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 5630ce391..32c3abfe7 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -126,7 +126,9 @@ void Range::AddConstant(int32_t value) {
bool may_overflow = false; // Overflow is ignored here.
lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+#ifdef DEBUG
Verify();
+#endif
}
@@ -173,7 +175,9 @@ bool Range::AddAndCheckOverflow(Range* other) {
lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
KeepOrder();
+#ifdef DEBUG
Verify();
+#endif
return may_overflow;
}
@@ -183,7 +187,9 @@ bool Range::SubAndCheckOverflow(Range* other) {
lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
KeepOrder();
+#ifdef DEBUG
Verify();
+#endif
return may_overflow;
}
@@ -197,9 +203,11 @@ void Range::KeepOrder() {
}
+#ifdef DEBUG
void Range::Verify() const {
ASSERT(lower_ <= upper_);
}
+#endif
bool Range::MulAndCheckOverflow(Range* other) {
@@ -210,7 +218,9 @@ bool Range::MulAndCheckOverflow(Range* other) {
int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
lower_ = Min(Min(v1, v2), Min(v3, v4));
upper_ = Max(Max(v1, v2), Max(v3, v4));
+#ifdef DEBUG
Verify();
+#endif
return may_overflow;
}
@@ -234,25 +244,6 @@ const char* HType::ToString() {
}
-const char* HType::ToShortString() {
- switch (type_) {
- case kTagged: return "t";
- case kTaggedPrimitive: return "p";
- case kTaggedNumber: return "n";
- case kSmi: return "m";
- case kHeapNumber: return "h";
- case kString: return "s";
- case kBoolean: return "b";
- case kNonPrimitive: return "r";
- case kJSArray: return "a";
- case kJSObject: return "o";
- case kUninitialized: return "z";
- }
- UNREACHABLE();
- return "Unreachable code";
-}
-
-
HType HType::TypeFromValue(Handle<Object> value) {
HType result = HType::Tagged();
if (value->IsSmi()) {
@@ -564,7 +555,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
// followed by a simulate instruction, we need to insert after the
// simulate instruction instead.
HInstruction* next = previous->next_;
- if (previous->HasSideEffects() && next != NULL) {
+ if (previous->HasObservableSideEffects() && next != NULL) {
ASSERT(next->IsSimulate());
previous = next;
next = previous->next_;
@@ -587,11 +578,10 @@ void HInstruction::Verify() {
HBasicBlock* other_block = other_operand->block();
if (cur_block == other_block) {
if (!other_operand->IsPhi()) {
- HInstruction* cur = cur_block->first();
+ HInstruction* cur = this->previous();
while (cur != NULL) {
- ASSERT(cur != this); // We should reach other_operand before!
if (cur == other_operand) break;
- cur = cur->next();
+ cur = cur->previous();
}
// Must reach other operand in the same block!
ASSERT(cur == other_operand);
@@ -605,7 +595,7 @@ void HInstruction::Verify() {
// Verify that instructions that may have side-effects are followed
// by a simulate instruction.
- if (HasSideEffects() && !IsOsrEntry()) {
+ if (HasObservableSideEffects() && !IsOsrEntry()) {
ASSERT(next()->IsSimulate());
}
@@ -707,6 +697,14 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
+void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
+ HControlInstruction::PrintDataTo(stream);
+}
+
+
void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -775,17 +773,33 @@ void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" == ");
- stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
+ stream->Add(" == %o", *type_literal_);
+ HControlInstruction::PrintDataTo(stream);
+}
+
+
+HValue* HConstant::Canonicalize() {
+ return HasNoUses() && !IsBlockEntry() ? NULL : this;
+}
+
+
+HValue* HTypeof::Canonicalize() {
+ return HasNoUses() && !IsBlockEntry() ? NULL : this;
+}
+
+
+void HTypeof::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
}
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
- stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
+ stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+ if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
}
@@ -857,6 +871,23 @@ void HCheckFunction::PrintDataTo(StringStream* stream) {
}
+const char* HCheckInstanceType::GetCheckName() {
+ switch (check_) {
+ case IS_SPEC_OBJECT: return "object";
+ case IS_JS_ARRAY: return "array";
+ case IS_STRING: return "string";
+ case IS_SYMBOL: return "symbol";
+ }
+ UNREACHABLE();
+ return "";
+}
+
+void HCheckInstanceType::PrintDataTo(StringStream* stream) {
+ stream->Add("%s ", GetCheckName());
+ HUnaryOperation::PrintDataTo(stream);
+}
+
+
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@@ -1106,15 +1137,16 @@ void HPhi::AddIndirectUsesTo(int* dest) {
void HSimulate::PrintDataTo(StringStream* stream) {
- stream->Add("id=%d ", ast_id());
- if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
+ stream->Add("id=%d", ast_id());
+ if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
for (int i = 0; i < values_.length(); ++i) {
- if (!HasAssignedIndexAt(i)) {
- stream->Add(" push ");
- } else {
+ if (i > 0) stream->Add(",");
+ if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
+ } else {
+ stream->Add(" push ");
}
values_[i]->PrintNameTo(stream);
}
@@ -1195,7 +1227,10 @@ void HConstant::PrintDataTo(StringStream* stream) {
bool HArrayLiteral::IsCopyOnWrite() const {
- return constant_elements()->map() == HEAP->fixed_cow_array_map();
+ Handle<FixedArray> constant_elements = this->constant_elements();
+ FixedArrayBase* constant_elements_values =
+ FixedArrayBase::cast(constant_elements->get(1));
+ return constant_elements_values->map() == HEAP->fixed_cow_array_map();
}
@@ -1208,28 +1243,17 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
-Range* HBitAnd::InferRange() {
+Range* HBitwise::InferRange() {
+ if (op() == Token::BIT_XOR) return HValue::InferRange();
int32_t left_mask = (left()->range() != NULL)
? left()->range()->Mask()
: 0xffffffff;
int32_t right_mask = (right()->range() != NULL)
? right()->range()->Mask()
: 0xffffffff;
- int32_t result_mask = left_mask & right_mask;
- return (result_mask >= 0)
- ? new Range(0, result_mask)
- : HValue::InferRange();
-}
-
-
-Range* HBitOr::InferRange() {
- int32_t left_mask = (left()->range() != NULL)
- ? left()->range()->Mask()
- : 0xffffffff;
- int32_t right_mask = (right()->range() != NULL)
- ? right()->range()->Mask()
- : 0xffffffff;
- int32_t result_mask = left_mask | right_mask;
+ int32_t result_mask = (op() == Token::BIT_AND)
+ ? left_mask & right_mask
+ : left_mask | right_mask;
return (result_mask >= 0)
? new Range(0, result_mask)
: HValue::InferRange();
@@ -1301,6 +1325,13 @@ void HCompareGeneric::PrintDataTo(StringStream* stream) {
}
+void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add(Token::Name(token()));
+ stream->Add(" ");
+ HControlInstruction::PrintDataTo(stream);
+}
+
+
void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
@@ -1311,6 +1342,14 @@ void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
}
+void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
+ left()->PrintNameTo(stream);
+ stream->Add(" ");
+ right()->PrintNameTo(stream);
+ HControlInstruction::PrintDataTo(stream);
+}
+
+
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@@ -1352,7 +1391,7 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
- LookupResult lookup;
+ LookupResult lookup(map->GetIsolate());
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty()) {
switch (lookup.type()) {
@@ -1405,14 +1444,14 @@ bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(" .");
+ stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
}
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(" .");
+ stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
}
@@ -1425,7 +1464,7 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
}
-bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+bool HLoadKeyedFastElement::RequiresHoleCheck() {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
@@ -1442,11 +1481,6 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
}
-bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
- return true;
-}
-
-
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1488,6 +1522,7 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -1513,10 +1548,10 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
void HStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
- ASSERT(name()->IsString());
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" = ");
value()->PrintNameTo(stream);
+ stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (!transition().is_null()) {
stream->Add(" (transition map %p)", *transition());
}
@@ -1582,6 +1617,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -1596,9 +1632,26 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
}
+void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
- if (check_hole_value()) stream->Add(" (deleteable/read-only)");
+ if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+ if (details_.IsReadOnly()) stream->Add(" (read-only)");
+}
+
+
+bool HLoadGlobalCell::RequiresHoleCheck() {
+ if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (!use->IsChange()) return true;
+ }
+ return false;
}
@@ -1610,6 +1663,8 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
+ if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+ if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
@@ -1696,6 +1751,12 @@ HType HInstanceOfKnownGlobal::CalculateInferredType() {
}
+HType HChange::CalculateInferredType() {
+ if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
+ return type();
+}
+
+
HType HBitwiseBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
@@ -1711,43 +1772,43 @@ HType HAdd::CalculateInferredType() {
}
-HType HBitAnd::CalculateInferredType() {
+HType HBitNot::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HBitXor::CalculateInferredType() {
+HType HUnaryMathOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HBitOr::CalculateInferredType() {
- return HType::TaggedNumber();
+HType HStringCharFromCode::CalculateInferredType() {
+ return HType::String();
}
-HType HBitNot::CalculateInferredType() {
- return HType::TaggedNumber();
+HType HArrayLiteral::CalculateInferredType() {
+ return HType::JSArray();
}
-HType HUnaryMathOperation::CalculateInferredType() {
- return HType::TaggedNumber();
+HType HObjectLiteralFast::CalculateInferredType() {
+ return HType::JSObject();
}
-HType HShl::CalculateInferredType() {
- return HType::TaggedNumber();
+HType HObjectLiteralGeneric::CalculateInferredType() {
+ return HType::JSObject();
}
-HType HShr::CalculateInferredType() {
- return HType::TaggedNumber();
+HType HRegExpLiteral::CalculateInferredType() {
+ return HType::JSObject();
}
-HType HSar::CalculateInferredType() {
- return HType::TaggedNumber();
+HType HFunctionLiteral::CalculateInferredType() {
+ return HType::JSObject();
}
@@ -1838,6 +1899,167 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
+#define H_CONSTANT_INT32(val) \
+new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \
+ Representation::Integer32())
+#define H_CONSTANT_DOUBLE(val) \
+new(zone) HConstant(FACTORY->NewNumber(val, TENURED), \
+ Representation::Double())
+
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
+HInstruction* HInstr::New##HInstr(Zone* zone, \
+ HValue* context, \
+ HValue* left, \
+ HValue* right) { \
+ if (left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
+ if (TypeInfo::IsInt32Double(double_res)) { \
+ return H_CONSTANT_INT32(static_cast<int32_t>(double_res)); \
+ } \
+ return H_CONSTANT_DOUBLE(double_res); \
+ } \
+ } \
+ return new(zone) HInstr(context, left, right); \
+}
+
+
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
+
+#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
+
+
+HInstruction* HMod::NewHMod(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ if (left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
+ int32_t dividend = c_left->Integer32Value();
+ int32_t divisor = c_right->Integer32Value();
+ if (divisor != 0) {
+ int32_t res = dividend % divisor;
+ if ((res == 0) && (dividend < 0)) {
+ return H_CONSTANT_DOUBLE(-0.0);
+ }
+ return H_CONSTANT_INT32(res);
+ }
+ }
+ }
+ return new(zone) HMod(context, left, right);
+}
+
+
+HInstruction* HDiv::NewHDiv(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ // If left and right are constant values, try to return a constant value.
+ if (left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+ if (c_right->DoubleValue() != 0) {
+ double double_res = c_left->DoubleValue() / c_right->DoubleValue();
+ if (TypeInfo::IsInt32Double(double_res)) {
+ return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
+ }
+ return H_CONSTANT_DOUBLE(double_res);
+ }
+ }
+ }
+ return new(zone) HDiv(context, left, right);
+}
+
+
+HInstruction* HBitwise::NewHBitwise(Zone* zone,
+ Token::Value op,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ if (left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+ int32_t result;
+ int32_t v_left = c_left->NumberValueAsInteger32();
+ int32_t v_right = c_right->NumberValueAsInteger32();
+ switch (op) {
+ case Token::BIT_XOR:
+ result = v_left ^ v_right;
+ break;
+ case Token::BIT_AND:
+ result = v_left & v_right;
+ break;
+ case Token::BIT_OR:
+ result = v_left | v_right;
+ break;
+ default:
+ result = 0; // Please the compiler.
+ UNREACHABLE();
+ }
+ return H_CONSTANT_INT32(result);
+ }
+ }
+ return new(zone) HBitwise(op, context, left, right);
+}
+
+
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
+HInstruction* HInstr::New##HInstr(Zone* zone, \
+ HValue* context, \
+ HValue* left, \
+ HValue* right) { \
+ if (left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ return H_CONSTANT_INT32(result); \
+ } \
+ } \
+ return new(zone) HInstr(context, left, right); \
+}
+
+
+DEFINE_NEW_H_BITWISE_INSTR(HSar,
+c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
+DEFINE_NEW_H_BITWISE_INSTR(HShl,
+c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
+
+#undef DEFINE_NEW_H_BITWISE_INSTR
+
+
+HInstruction* HShr::NewHShr(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ if (left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+ int32_t left_val = c_left->NumberValueAsInteger32();
+ int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
+ if ((right_val == 0) && (left_val < 0)) {
+ return H_CONSTANT_DOUBLE(
+ static_cast<double>(static_cast<uint32_t>(left_val)));
+ }
+ return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
+ }
+ }
+ return new(zone) HShr(context, left, right);
+}
+
+
+#undef H_CONSTANT_INT32
+#undef H_CONSTANT_DOUBLE
+
+
void HIn::PrintDataTo(StringStream* stream) {
key()->PrintNameTo(stream);
stream->Add(" ");
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 1bc28ba82..52fed8844 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -67,10 +67,8 @@ class LChunkBuilder;
V(ArgumentsLength) \
V(ArgumentsObject) \
V(ArrayLiteral) \
- V(BitAnd) \
+ V(Bitwise) \
V(BitNot) \
- V(BitOr) \
- V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
V(Branch) \
@@ -118,10 +116,12 @@ class LChunkBuilder;
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@@ -139,7 +139,8 @@ class LChunkBuilder;
V(LoadNamedGeneric) \
V(Mod) \
V(Mul) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -171,6 +172,7 @@ class LChunkBuilder;
V(Throw) \
V(ToFastProperties) \
V(ToInt32) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -182,6 +184,7 @@ class LChunkBuilder;
V(Calls) \
V(InobjectFields) \
V(BackingStoreFields) \
+ V(ElementsKind) \
V(ArrayElements) \
V(DoubleArrayElements) \
V(SpecializedArrayElements) \
@@ -245,7 +248,9 @@ class Range: public ZoneObject {
return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
}
void KeepOrder();
+#ifdef DEBUG
void Verify() const;
+#endif
void StackUpon(Range* other) {
Intersect(other);
@@ -397,10 +402,14 @@ class HType {
return type_ == kUninitialized;
}
+ bool IsHeapObject() {
+ ASSERT(type_ != kUninitialized);
+ return IsHeapNumber() || IsString() || IsNonPrimitive();
+ }
+
static HType TypeFromValue(Handle<Object> value);
const char* ToString();
- const char* ToShortString();
private:
enum Type {
@@ -615,8 +624,14 @@ class HValue: public ZoneObject {
void SetAllSideEffects() { flags_ |= AllSideEffects(); }
void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
+ bool HasObservableSideEffects() const {
+ return (flags_ & ObservableSideEffects()) != 0;
+ }
int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
+ int ObservableChangesFlags() const {
+ return flags_ & ChangesFlagsMask() & ObservableSideEffects();
+ }
Range* range() const { return range_; }
bool HasRange() const { return range_ != NULL; }
@@ -625,7 +640,7 @@ class HValue: public ZoneObject {
void ComputeInitialRange();
// Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) const = 0;
+ virtual Representation RequiredInputRepresentation(int index) = 0;
virtual Representation InferredRepresentation() {
return representation();
@@ -696,6 +711,12 @@ class HValue: public ZoneObject {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
+ // A flag mask of all side effects that can make observable changes in
+ // an executing program (i.e. are not safe to repeat, move or remove);
+ static int ObservableSideEffects() {
+ return ChangesFlagsMask() & ~(1 << kChangesElementsKind);
+ }
+
// Remove the matching use from the use list if present. Returns the
// removed list node or NULL.
HUseListNode* RemoveUse(HValue* value, int index);
@@ -841,7 +862,7 @@ class HTemplateControlInstruction: public HControlInstruction {
class HBlockEntry: public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -854,7 +875,7 @@ class HBlockEntry: public HTemplateInstruction<0> {
// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
class HSoftDeoptimize: public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -866,7 +887,7 @@ class HDeoptimize: public HControlInstruction {
public:
explicit HDeoptimize(int environment_length) : values_(environment_length) { }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -908,10 +929,10 @@ class HDeoptimize: public HControlInstruction {
class HGoto: public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
- }
+ SetSuccessorAt(0, target);
+ }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -951,7 +972,7 @@ class HBranch: public HUnaryControlInstruction {
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -983,7 +1004,7 @@ class HCompareMap: public HUnaryControlInstruction {
Handle<Map> map() const { return map_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1000,7 +1021,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1014,7 +1035,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> {
class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1049,7 +1070,7 @@ class HThrow: public HTemplateInstruction<2> {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1064,7 +1085,7 @@ class HUseConst: public HUnaryOperation {
public:
explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1083,7 +1104,7 @@ class HForceRepresentation: public HTemplateInstruction<1> {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return representation(); // Same as the output representation.
}
@@ -1094,27 +1115,29 @@ class HForceRepresentation: public HTemplateInstruction<1> {
class HChange: public HUnaryOperation {
public:
HChange(HValue* value,
- Representation from,
Representation to,
bool is_truncating,
bool deoptimize_on_undefined)
- : HUnaryOperation(value),
- from_(from),
- deoptimize_on_undefined_(deoptimize_on_undefined) {
- ASSERT(!from.IsNone() && !to.IsNone());
- ASSERT(!from.Equals(to));
+ : HUnaryOperation(value) {
+ ASSERT(!value->representation().IsNone() && !to.IsNone());
+ ASSERT(!value->representation().Equals(to));
set_representation(to);
+ set_type(HType::TaggedNumber());
SetFlag(kUseGVN);
+ if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
if (is_truncating) SetFlag(kTruncatingToInt32);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HType CalculateInferredType();
- Representation from() const { return from_; }
- Representation to() const { return representation(); }
- bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
- virtual Representation RequiredInputRepresentation(int index) const {
- return from_;
+ Representation from() { return value()->representation(); }
+ Representation to() { return representation(); }
+ bool deoptimize_on_undefined() const {
+ return CheckFlag(kDeoptimizeOnUndefined);
+ }
+ virtual Representation RequiredInputRepresentation(int index) {
+ return from();
}
virtual Range* InferRange();
@@ -1124,16 +1147,7 @@ class HChange: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsChange()) return false;
- HChange* change = HChange::cast(other);
- return to().Equals(change->to())
- && deoptimize_on_undefined() == change->deoptimize_on_undefined();
- }
-
- private:
- Representation from_;
- bool deoptimize_on_undefined_;
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1145,7 +1159,7 @@ class HClampToUint8: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1164,7 +1178,7 @@ class HToInt32: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1223,7 +1237,7 @@ class HSimulate: public HInstruction {
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) { return values_[index]; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1268,7 +1282,7 @@ class HStackCheck: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1306,7 +1320,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1323,7 +1337,7 @@ class HLeaveInlined: public HTemplateInstruction<0> {
public:
HLeaveInlined() {}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1337,7 +1351,7 @@ class HPushArgument: public HUnaryOperation {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1349,19 +1363,27 @@ class HPushArgument: public HUnaryOperation {
class HThisFunction: public HTemplateInstruction<0> {
public:
- HThisFunction() {
+ explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ Handle<JSFunction> closure() const { return closure_; }
+
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) {
+ HThisFunction* b = HThisFunction::cast(other);
+ return *closure() == *b->closure();
+ }
+
+ private:
+ Handle<JSFunction> closure_;
};
@@ -1372,7 +1394,7 @@ class HContext: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1392,7 +1414,7 @@ class HOuterContext: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(OuterContext);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1410,7 +1432,7 @@ class HGlobalObject: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1429,7 +1451,7 @@ class HGlobalReceiver: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1465,7 +1487,7 @@ class HUnaryCall: public HCall<1> {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1485,7 +1507,7 @@ class HBinaryCall: public HCall<2> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1500,7 +1522,7 @@ class HInvokeFunction: public HBinaryCall {
: HBinaryCall(context, function, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1525,7 +1547,7 @@ class HCallConstantFunction: public HCall<0> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1542,7 +1564,7 @@ class HCallKeyed: public HBinaryCall {
: HBinaryCall(context, key, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1566,7 +1588,7 @@ class HCallNamed: public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1575,15 +1597,16 @@ class HCallNamed: public HUnaryCall {
};
-class HCallFunction: public HUnaryCall {
+class HCallFunction: public HBinaryCall {
public:
- HCallFunction(HValue* context, int argument_count)
- : HUnaryCall(context, argument_count) {
+ HCallFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
}
- HValue* context() { return value(); }
+ HValue* context() { return first(); }
+ HValue* function() { return second(); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1602,7 +1625,7 @@ class HCallGlobal: public HUnaryCall {
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1622,7 +1645,7 @@ class HCallKnownGlobal: public HCall<0> {
Handle<JSFunction> target() const { return target_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -1639,7 +1662,7 @@ class HCallNew: public HBinaryCall {
: HBinaryCall(context, constructor, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1666,7 +1689,7 @@ class HCallRuntime: public HCall<1> {
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1692,7 +1715,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1716,7 +1739,7 @@ class HFixedArrayBaseLength: public HUnaryOperation {
SetFlag(kDependsOnArrayLengths);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1732,10 +1755,10 @@ class HElementsKind: public HUnaryOperation {
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetFlag(kDependsOnElementsKind);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1754,7 +1777,7 @@ class HBitNot: public HUnaryOperation {
SetFlag(kTruncatingToInt32);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
virtual HType CalculateInferredType();
@@ -1804,7 +1827,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -1859,9 +1882,10 @@ class HLoadElements: public HUnaryOperation {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
+ SetFlag(kDependsOnElementsKind);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1884,7 +1908,7 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -1908,7 +1932,7 @@ class HCheckMap: public HTemplateInstruction<2> {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -1938,7 +1962,7 @@ class HCheckFunction: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -1978,7 +2002,9 @@ class HCheckInstanceType: public HUnaryOperation {
return new HCheckInstanceType(value, IS_SYMBOL);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2008,6 +2034,8 @@ class HCheckInstanceType: public HUnaryOperation {
LAST_INTERVAL_CHECK = IS_JS_ARRAY
};
+ const char* GetCheckName();
+
HCheckInstanceType(HValue* value, Check check)
: HUnaryOperation(value), check_(check) {
set_representation(Representation::Tagged());
@@ -2025,7 +2053,7 @@ class HCheckNonSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2071,7 +2099,7 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -2102,7 +2130,7 @@ class HCheckSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
@@ -2151,7 +2179,7 @@ class HPhi: public HValue {
}
virtual Range* InferRange();
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
virtual HType CalculateInferredType();
@@ -2243,7 +2271,7 @@ class HArgumentsObject: public HTemplateInstruction<0> {
SetFlag(kIsArguments);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -2259,7 +2287,20 @@ class HConstant: public HTemplateInstruction<0> {
bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ bool ImmortalImmovable() const {
+ Heap* heap = HEAP;
+ if (*handle_ == heap->undefined_value()) return true;
+ if (*handle_ == heap->null_value()) return true;
+ if (*handle_ == heap->true_value()) return true;
+ if (*handle_ == heap->false_value()) return true;
+ if (*handle_ == heap->the_hole_value()) return true;
+ if (*handle_ == heap->minus_zero_value()) return true;
+ if (*handle_ == heap->nan_value()) return true;
+ if (*handle_ == heap->empty_string()) return true;
+ return false;
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -2272,6 +2313,7 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
+ virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
@@ -2287,6 +2329,12 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasDoubleValue());
return double_value_;
}
+ bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
+ int32_t NumberValueAsInteger32() const {
+ ASSERT(HasNumberValue());
+ if (has_int32_value_) return int32_value_;
+ return DoubleToInt32(double_value_);
+ }
bool HasStringValue() const { return handle_->IsString(); }
bool ToBoolean() const;
@@ -2367,7 +2415,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The length is untagged, all other inputs are tagged.
return (index == 2)
? Representation::Integer32()
@@ -2394,7 +2442,7 @@ class HArgumentsElements: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -2410,7 +2458,7 @@ class HArgumentsLength: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2433,7 +2481,7 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The arguments elements is considered tagged.
return index == 0
? Representation::Tagged()
@@ -2459,7 +2507,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
@@ -2484,7 +2532,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return index == 0
? Representation::Tagged()
: representation();
@@ -2522,7 +2570,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
}
virtual HType CalculateInferredType();
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return index == 0
? Representation::Tagged()
: representation();
@@ -2549,7 +2597,7 @@ class HCompareGeneric: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2587,7 +2635,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
return input_representation_;
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return input_representation_;
}
virtual void PrintDataTo(StringStream* stream);
@@ -2610,7 +2658,9 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2629,7 +2679,7 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
HValue* left() { return value(); }
int right() const { return right_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
@@ -2641,21 +2691,25 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
};
-class HIsNullAndBranch: public HUnaryControlInstruction {
+class HIsNilAndBranch: public HUnaryControlInstruction {
public:
- HIsNullAndBranch(HValue* value, bool is_strict)
- : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
+ HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
+ : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
+
+ EqualityKind kind() const { return kind_; }
+ NilValue nil() const { return nil_; }
- bool is_strict() const { return is_strict_; }
+ virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
private:
- bool is_strict_;
+ EqualityKind kind_;
+ NilValue nil_;
};
@@ -2664,13 +2718,25 @@ class HIsObjectAndBranch: public HUnaryControlInstruction {
explicit HIsObjectAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
};
+class HIsStringAndBranch: public HUnaryControlInstruction {
+ public:
+ explicit HIsStringAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+};
+
class HIsSmiAndBranch: public HUnaryControlInstruction {
public:
@@ -2679,7 +2745,7 @@ class HIsSmiAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2693,7 +2759,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction {
explicit HIsUndetectableAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2701,9 +2767,45 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction {
};
+class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+ public:
+ HStringCompareAndBranch(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
+ set_representation(Representation::Tagged());
+ }
+
+ HValue* context() { return OperandAt(0); }
+ HValue* left() { return OperandAt(1); }
+ HValue* right() { return OperandAt(2); }
+ Token::Value token() const { return token_; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ Representation GetInputRepresentation() const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
+
+ private:
+ Token::Value token_;
+};
+
+
class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -2725,7 +2827,7 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2742,7 +2844,7 @@ class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
explicit HHasCachedArrayIndexAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2757,7 +2859,7 @@ class HGetCachedArrayIndex: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2776,7 +2878,7 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2800,7 +2902,7 @@ class HTypeofIsAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2817,7 +2919,7 @@ class HInstanceOf: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2845,7 +2947,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2870,7 +2972,7 @@ class HPower: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return index == 0
? Representation::Double()
: Representation::None();
@@ -2898,6 +3000,11 @@ class HAdd: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ static HInstruction* NewHAdd(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Add)
@@ -2918,6 +3025,11 @@ class HSub: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ static HInstruction* NewHSub(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
@@ -2941,6 +3053,11 @@ class HMul: public HArithmeticBinaryOperation {
return !representation().IsTagged();
}
+ static HInstruction* NewHMul(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
@@ -2969,6 +3086,11 @@ class HMod: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ static HInstruction* NewHMod(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
@@ -2988,24 +3110,13 @@ class HDiv: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- DECLARE_CONCRETE_INSTRUCTION(Div)
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- virtual Range* InferRange();
-};
-
-
-class HBitAnd: public HBitwiseBinaryOperation {
- public:
- HBitAnd(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
+ static HInstruction* NewHDiv(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
- DECLARE_CONCRETE_INSTRUCTION(BitAnd)
+ DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -3014,35 +3125,36 @@ class HBitAnd: public HBitwiseBinaryOperation {
};
-class HBitXor: public HBitwiseBinaryOperation {
+class HBitwise: public HBitwiseBinaryOperation {
public:
- HBitXor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
-
- virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(BitXor)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
+ HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right), op_(op) {
+ ASSERT(op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR);
+ }
-class HBitOr: public HBitwiseBinaryOperation {
- public:
- HBitOr(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ Token::Value op() const { return op_; }
virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(BitOr)
+ static HInstruction* NewHBitwise(Zone* zone,
+ Token::Value op,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) {
+ return op() == HBitwise::cast(other)->op();
+ }
virtual Range* InferRange();
+
+ private:
+ Token::Value op_;
};
@@ -3052,7 +3164,11 @@ class HShl: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType();
+
+ static HInstruction* NewHShl(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
DECLARE_CONCRETE_INSTRUCTION(Shl)
@@ -3067,7 +3183,11 @@ class HShr: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType();
+
+ static HInstruction* NewHShr(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
DECLARE_CONCRETE_INSTRUCTION(Shr)
@@ -3082,7 +3202,11 @@ class HSar: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType();
+
+ static HInstruction* NewHSar(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
DECLARE_CONCRETE_INSTRUCTION(Sar)
@@ -3099,7 +3223,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
int ast_id() const { return ast_id_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -3120,7 +3244,7 @@ class HParameter: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -3152,7 +3276,7 @@ class HCallStub: public HUnaryCall {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3168,7 +3292,7 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
public:
HUnknownOSRValue() { set_representation(Representation::Tagged()); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -3178,15 +3302,15 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
class HLoadGlobalCell: public HTemplateInstruction<0> {
public:
- HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
- : cell_(cell), check_hole_value_(check_hole_value) {
+ HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
+ : cell_(cell), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool check_hole_value() const { return check_hole_value_; }
+ bool RequiresHoleCheck();
virtual void PrintDataTo(StringStream* stream);
@@ -3195,7 +3319,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
return reinterpret_cast<intptr_t>(*cell_);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
@@ -3209,7 +3333,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
private:
Handle<JSGlobalPropertyCell> cell_;
- bool check_hole_value_;
+ PropertyDetails details_;
};
@@ -3234,7 +3358,7 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3246,21 +3370,33 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
+inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+ return !value->type().IsBoolean()
+ && !value->type().IsSmi()
+ && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
+}
+
+
class HStoreGlobalCell: public HUnaryOperation {
public:
HStoreGlobalCell(HValue* value,
Handle<JSGlobalPropertyCell> cell,
- bool check_hole_value)
+ PropertyDetails details)
: HUnaryOperation(value),
cell_(cell),
- check_hole_value_(check_hole_value) {
+ details_(details) {
SetFlag(kChangesGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool check_hole_value() const { return check_hole_value_; }
+ bool RequiresHoleCheck() {
+ return !details_.IsDontDelete() || details_.IsReadOnly();
+ }
+ bool NeedsWriteBarrier() {
+ return StoringValueNeedsWriteBarrier(value());
+ }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3269,7 +3405,7 @@ class HStoreGlobalCell: public HUnaryOperation {
private:
Handle<JSGlobalPropertyCell> cell_;
- bool check_hole_value_;
+ PropertyDetails details_;
};
@@ -3279,9 +3415,9 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
HValue* global_object,
Handle<Object> name,
HValue* value,
- bool strict_mode)
+ StrictModeFlag strict_mode_flag)
: name_(name),
- strict_mode_(strict_mode) {
+ strict_mode_flag_(strict_mode_flag) {
SetOperandAt(0, context);
SetOperandAt(1, global_object);
SetOperandAt(2, value);
@@ -3293,11 +3429,11 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
HValue* global_object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
HValue* value() { return OperandAt(2); }
- bool strict_mode() { return strict_mode_; }
+ StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3305,7 +3441,7 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
private:
Handle<Object> name_;
- bool strict_mode_;
+ StrictModeFlag strict_mode_flag_;
};
@@ -3320,7 +3456,7 @@ class HLoadContextSlot: public HUnaryOperation {
int slot_index() const { return slot_index_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3339,13 +3475,6 @@ class HLoadContextSlot: public HUnaryOperation {
};
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsBoolean()
- && !value->type().IsSmi()
- && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
class HStoreContextSlot: public HTemplateInstruction<2> {
public:
HStoreContextSlot(HValue* context, int slot_index, HValue* value)
@@ -3363,7 +3492,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
return StoringValueNeedsWriteBarrier(value());
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3396,7 +3525,7 @@ class HLoadNamedField: public HUnaryOperation {
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3428,7 +3557,7 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
Handle<String> name() { return name_; }
bool need_generic() { return need_generic_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3463,7 +3592,7 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3487,7 +3616,7 @@ class HLoadFunctionPrototype: public HUnaryOperation {
HValue* function() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3511,7 +3640,7 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
return index == 0
? Representation::Tagged()
@@ -3520,7 +3649,7 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- bool RequiresHoleCheck() const;
+ bool RequiresHoleCheck();
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
@@ -3542,7 +3671,7 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
return index == 0
? Representation::Tagged()
@@ -3551,8 +3680,6 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- bool RequiresHoleCheck() const;
-
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected:
@@ -3582,7 +3709,7 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32, but the base pointer
// for the element load is a naked pointer.
return index == 0
@@ -3625,7 +3752,7 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3654,7 +3781,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3686,9 +3813,9 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
HValue* object,
Handle<String> name,
HValue* value,
- bool strict_mode)
+ StrictModeFlag strict_mode_flag)
: name_(name),
- strict_mode_(strict_mode) {
+ strict_mode_flag_(strict_mode_flag) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -3699,11 +3826,11 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
- bool strict_mode() { return strict_mode_; }
+ StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3711,20 +3838,22 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
private:
Handle<String> name_;
- bool strict_mode_;
+ StrictModeFlag strict_mode_flag_;
};
class HStoreKeyedFastElement: public HTemplateInstruction<3> {
public:
- HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
+ HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
+ ElementsKind elements_kind = FAST_ELEMENTS)
+ : elements_kind_(elements_kind) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
SetFlag(kChangesArrayElements);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
return index == 1
? Representation::Integer32()
@@ -3734,14 +3863,28 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
+ bool value_is_smi() {
+ return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
+ }
bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
+ if (value_is_smi()) {
+ return false;
+ } else {
+ return StoringValueNeedsWriteBarrier(value());
+ }
+ }
+
+ bool ValueNeedsSmiCheck() {
+ return value_is_smi();
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+
+ private:
+ ElementsKind elements_kind_;
};
@@ -3756,7 +3899,7 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
SetFlag(kChangesDoubleArrayElements);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
if (index == 1) {
return Representation::Integer32();
} else if (index == 2) {
@@ -3795,7 +3938,7 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
if (index == 0) {
return Representation::External();
} else {
@@ -3828,8 +3971,8 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HValue* object,
HValue* key,
HValue* value,
- bool strict_mode)
- : strict_mode_(strict_mode) {
+ StrictModeFlag strict_mode_flag)
+ : strict_mode_flag_(strict_mode_flag) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -3841,9 +3984,9 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
HValue* context() { return OperandAt(3); }
- bool strict_mode() { return strict_mode_; }
+ StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3852,7 +3995,45 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
private:
- bool strict_mode_;
+ StrictModeFlag strict_mode_flag_;
+};
+
+
+class HTransitionElementsKind: public HTemplateInstruction<1> {
+ public:
+ HTransitionElementsKind(HValue* object,
+ Handle<Map> original_map,
+ Handle<Map> transitioned_map)
+ : original_map_(original_map),
+ transitioned_map_(transitioned_map) {
+ SetOperandAt(0, object);
+ SetFlag(kUseGVN);
+ SetFlag(kChangesElementsKind);
+ set_representation(Representation::Tagged());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ HValue* object() { return OperandAt(0); }
+ Handle<Map> original_map() { return original_map_; }
+ Handle<Map> transitioned_map() { return transitioned_map_; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
+ return original_map_.is_identical_to(instr->original_map()) &&
+ transitioned_map_.is_identical_to(instr->transitioned_map());
+ }
+
+ private:
+ Handle<Map> original_map_;
+ Handle<Map> transitioned_map_;
};
@@ -3865,7 +4046,7 @@ class HStringAdd: public HBinaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3891,7 +4072,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
// The index is supposed to be Integer32.
return index == 2
? Representation::Integer32()
@@ -3918,15 +4099,16 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
HStringCharFromCode(HValue* context, HValue* char_code) {
SetOperandAt(0, context);
SetOperandAt(1, char_code);
- set_representation(Representation::Tagged());
+ set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return index == 0
? Representation::Tagged()
: Representation::Integer32();
}
+ virtual HType CalculateInferredType();
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@@ -3945,7 +4127,7 @@ class HStringLength: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -4001,9 +4183,10 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
bool IsCopyOnWrite() const;
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
@@ -4013,14 +4196,49 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
};
-class HObjectLiteral: public HMaterializedLiteral<1> {
+class HObjectLiteralFast: public HMaterializedLiteral<1> {
public:
- HObjectLiteral(HValue* context,
- Handle<FixedArray> constant_properties,
- bool fast_elements,
- int literal_index,
- int depth,
- bool has_function)
+ HObjectLiteralFast(HValue* context,
+ Handle<JSObject> boilerplate,
+ int total_size,
+ int literal_index,
+ int depth)
+ : HMaterializedLiteral<1>(literal_index, depth),
+ boilerplate_(boilerplate),
+ total_size_(total_size) {
+ SetOperandAt(0, context);
+ }
+
+ // Maximum depth and total number of properties for object literal
+ // graphs to be considered for fast deep-copying.
+ static const int kMaxObjectLiteralDepth = 3;
+ static const int kMaxObjectLiteralProperties = 8;
+
+ HValue* context() { return OperandAt(0); }
+ Handle<JSObject> boilerplate() const { return boilerplate_; }
+ int total_size() const { return total_size_; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+ virtual HType CalculateInferredType();
+
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast)
+
+ private:
+ Handle<JSObject> boilerplate_;
+ int total_size_;
+};
+
+
+class HObjectLiteralGeneric: public HMaterializedLiteral<1> {
+ public:
+ HObjectLiteralGeneric(HValue* context,
+ Handle<FixedArray> constant_properties,
+ bool fast_elements,
+ int literal_index,
+ int depth,
+ bool has_function)
: HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
fast_elements_(fast_elements),
@@ -4035,11 +4253,12 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
bool fast_elements() const { return fast_elements_; }
bool has_function() const { return has_function_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric)
private:
Handle<FixedArray> constant_properties_;
@@ -4058,15 +4277,17 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
pattern_(pattern),
flags_(flags) {
SetOperandAt(0, context);
+ SetAllSideEffects();
}
HValue* context() { return OperandAt(0); }
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
@@ -4088,9 +4309,10 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
@@ -4114,7 +4336,10 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual HValue* Canonicalize();
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -4128,11 +4353,11 @@ class HToFastProperties: public HUnaryOperation {
// This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating
// object literals.
- ASSERT(value->IsObjectLiteral());
+ ASSERT(value->IsObjectLiteralGeneric() || value->IsObjectLiteralFast());
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -4146,7 +4371,7 @@ class HValueOf: public HUnaryOperation {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -4162,7 +4387,7 @@ class HDeleteProperty: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -4189,7 +4414,7 @@ class HIn: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* object() { return OperandAt(2); }
- virtual Representation RequiredInputRepresentation(int index) const {
+ virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index c625fba8d..5c0703bc3 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -164,10 +164,11 @@ void HBasicBlock::Finish(HControlInstruction* end) {
}
-void HBasicBlock::Goto(HBasicBlock* block) {
+void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
if (block->IsInlineReturnTarget()) {
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
+ if (drop_extra) last_environment_->Drop(1);
}
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(block);
@@ -175,11 +176,14 @@ void HBasicBlock::Goto(HBasicBlock* block) {
}
-void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
+void HBasicBlock::AddLeaveInlined(HValue* return_value,
+ HBasicBlock* target,
+ bool drop_extra) {
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
+ if (drop_extra) last_environment_->Drop(1);
last_environment()->Push(return_value);
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(target);
@@ -422,7 +426,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
};
-void HGraph::Verify() const {
+void HGraph::Verify(bool do_full_verify) const {
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@@ -473,25 +477,27 @@ void HGraph::Verify() const {
// Check special property of first block to have no predecessors.
ASSERT(blocks_.at(0)->predecessors()->is_empty());
- // Check that the graph is fully connected.
- ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
- ASSERT(analyzer.visited_count() == blocks_.length());
+ if (do_full_verify) {
+ // Check that the graph is fully connected.
+ ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+ ASSERT(analyzer.visited_count() == blocks_.length());
- // Check that entry block dominator is NULL.
- ASSERT(entry_block_->dominator() == NULL);
+ // Check that entry block dominator is NULL.
+ ASSERT(entry_block_->dominator() == NULL);
- // Check dominators.
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_.at(i);
- if (block->dominator() == NULL) {
- // Only start block may have no dominator assigned to.
- ASSERT(i == 0);
- } else {
- // Assert that block is unreachable if dominator must not be visited.
- ReachabilityAnalyzer dominator_analyzer(entry_block_,
- blocks_.length(),
- block->dominator());
- ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+ // Check dominators.
+ for (int i = 0; i < blocks_.length(); ++i) {
+ HBasicBlock* block = blocks_.at(i);
+ if (block->dominator() == NULL) {
+ // Only start block may have no dominator assigned to.
+ ASSERT(i == 0);
+ } else {
+ // Assert that block is unreachable if dominator must not be visited.
+ ReachabilityAnalyzer dominator_analyzer(entry_block_,
+ blocks_.length(),
+ block->dominator());
+ ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+ }
}
}
}
@@ -539,7 +545,7 @@ HConstant* HGraph::GetConstantHole() {
HGraphBuilder::HGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle)
: function_state_(NULL),
- initial_function_state_(this, info, oracle),
+ initial_function_state_(this, info, oracle, false),
ast_context_(NULL),
break_scope_(NULL),
graph_(NULL),
@@ -728,6 +734,7 @@ void HGraph::Postorder(HBasicBlock* block,
Postorder(it.Current(), visited, order, block);
}
} else {
+ ASSERT(block->IsFinished());
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
Postorder(it.Current(), visited, order, loop_header);
}
@@ -750,7 +757,7 @@ void HGraph::AssignDominators() {
// All others are back edges, and thus cannot dominate the loop header.
blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
} else {
- for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
+ for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
}
}
@@ -850,7 +857,7 @@ void HGraph::EliminateUnreachablePhis() {
}
-bool HGraph::CheckPhis() {
+bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -863,13 +870,11 @@ bool HGraph::CheckPhis() {
}
-bool HGraph::CollectPhis() {
+bool HGraph::CheckConstPhiUses() {
int block_count = blocks_.length();
- phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi);
// Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -880,6 +885,18 @@ bool HGraph::CollectPhis() {
}
+void HGraph::CollectPhis() {
+ int block_count = blocks_.length();
+ phi_list_ = new ZoneList<HPhi*>(block_count);
+ for (int i = 0; i < block_count; ++i) {
+ for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+ HPhi* phi = blocks_[i]->phis()->at(j);
+ phi_list_->Add(phi);
+ }
+ }
+}
+
+
void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
BitVector in_worklist(GetMaximumValueID());
for (int i = 0; i < worklist->length(); ++i) {
@@ -1330,6 +1347,7 @@ class HGlobalValueNumberer BASE_EMBEDDED {
explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
: graph_(graph),
info_(info),
+ removed_side_effects_(false),
block_side_effects_(graph->blocks()->length()),
loop_side_effects_(graph->blocks()->length()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
@@ -1341,7 +1359,8 @@ class HGlobalValueNumberer BASE_EMBEDDED {
ASSERT(!info_->isolate()->heap()->allow_allocation(true));
}
- void Analyze();
+ // Returns true if values with side effects are removed.
+ bool Analyze();
private:
int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
@@ -1361,6 +1380,7 @@ class HGlobalValueNumberer BASE_EMBEDDED {
HGraph* graph_;
CompilationInfo* info_;
+ bool removed_side_effects_;
// A map of block IDs to their side effects.
ZoneList<int> block_side_effects_;
@@ -1374,13 +1394,14 @@ class HGlobalValueNumberer BASE_EMBEDDED {
};
-void HGlobalValueNumberer::Analyze() {
+bool HGlobalValueNumberer::Analyze() {
ComputeBlockSideEffects();
if (FLAG_loop_invariant_code_motion) {
LoopInvariantCodeMotion();
}
HValueMap* map = new(zone()) HValueMap();
AnalyzeBlock(graph_->entry_block(), map);
+ return removed_side_effects_;
}
@@ -1514,11 +1535,12 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
HInstruction* next = instr->next();
int flags = instr->ChangesFlags();
if (flags != 0) {
- ASSERT(!instr->CheckFlag(HValue::kUseGVN));
// Clear all instructions in the map that are affected by side effects.
map->Kill(flags);
TraceGVN("Instruction %d kills\n", instr->id());
- } else if (instr->CheckFlag(HValue::kUseGVN)) {
+ }
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ ASSERT(!instr->HasObservableSideEffects());
HValue* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
@@ -1527,6 +1549,7 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
instr->Mnemonic(),
other->id(),
other->Mnemonic());
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
instr->DeleteAndReplaceWith(other);
} else {
map->Add(instr);
@@ -1656,7 +1679,7 @@ Representation HInferRepresentation::TryChange(HValue* value) {
}
// Prefer unboxing over boxing, the latter is more expensive.
- if (tagged_count > non_tagged_count) Representation::None();
+ if (tagged_count > non_tagged_count) return Representation::None();
// Prefer Integer32 over Double, if possible.
if (int32_count > 0 && value->IsConvertibleToInteger()) {
@@ -1851,7 +1874,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
}
if (new_value == NULL) {
- new_value = new(zone()) HChange(value, value->representation(), to,
+ new_value = new(zone()) HChange(value, to,
is_truncating, deoptimize_on_undefined);
}
@@ -1996,11 +2019,13 @@ void HGraph::ComputeMinusZeroChecks() {
// a (possibly inlined) function.
FunctionState::FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
- TypeFeedbackOracle* oracle)
+ TypeFeedbackOracle* oracle,
+ bool drop_extra)
: owner_(owner),
compilation_info_(info),
oracle_(oracle),
call_context_(NULL),
+ drop_extra_(drop_extra),
function_return_(NULL),
test_context_(NULL),
outer_(owner->function_state()) {
@@ -2090,12 +2115,12 @@ void TestContext::ReturnValue(HValue* value) {
void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
- ASSERT(!instr->HasSideEffects());
+ ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
@@ -2113,12 +2138,12 @@ void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
- ASSERT(!instr->HasSideEffects());
+ ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout("bad value context for arguments object value");
}
@@ -2143,7 +2168,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
- if (instr->HasSideEffects()) {
+ if (instr->HasObservableSideEffects()) {
builder->Push(instr);
builder->AddSimulate(ast_id);
builder->Pop();
@@ -2153,14 +2178,14 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
- ASSERT(!instr->HasSideEffects());
+ ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
owner()->current_block()->Finish(instr);
- empty_true->Goto(if_true());
- empty_false->Goto(if_false());
+ empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
+ empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
owner()->set_current_block(NULL);
}
@@ -2181,8 +2206,8 @@ void TestContext::BuildBranch(HValue* value) {
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
- empty_true->Goto(if_true());
- empty_false->Goto(if_false());
+ empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
+ empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
builder->set_current_block(NULL);
}
@@ -2302,7 +2327,7 @@ HGraph* HGraphBuilder::CreateGraph() {
// Handle implicit declaration of the function name in named function
// expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) {
- HandleDeclaration(scope->function(), Variable::CONST, NULL);
+ HandleDeclaration(scope->function(), CONST, NULL);
}
VisitDeclarations(scope->declarations());
AddSimulate(AstNode::kDeclarationsId);
@@ -2323,17 +2348,24 @@ HGraph* HGraphBuilder::CreateGraph() {
graph()->OrderBlocks();
graph()->AssignDominators();
+
+#ifdef DEBUG
+ // Do a full verify after building the graph and computing dominators.
+ graph()->Verify(true);
+#endif
+
graph()->PropagateDeoptimizingMark();
- graph()->EliminateRedundantPhis();
- if (!graph()->CheckPhis()) {
- Bailout("Unsupported phi use of arguments object");
+ if (!graph()->CheckConstPhiUses()) {
+ Bailout("Unsupported phi use of const variable");
return NULL;
}
- if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
- if (!graph()->CollectPhis()) {
- Bailout("Unsupported phi use of uninitialized constant");
+ graph()->EliminateRedundantPhis();
+ if (!graph()->CheckArgumentsPhiUses()) {
+ Bailout("Unsupported phi use of arguments");
return NULL;
}
+ if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
+ graph()->CollectPhis();
HInferRepresentation rep(graph());
rep.Analyze();
@@ -2348,7 +2380,13 @@ HGraph* HGraphBuilder::CreateGraph() {
if (FLAG_use_gvn) {
HPhase phase("Global value numbering", graph());
HGlobalValueNumberer gvn(graph(), info());
- gvn.Analyze();
+ bool removed_side_effects = gvn.Analyze();
+ // Trigger a second analysis pass to further eliminate duplicate values that
+ // could only be discovered by removing side-effect-generating instructions
+ // during the first pass.
+ if (FLAG_smi_only_arrays && removed_side_effects) {
+ gvn.Analyze();
+ }
}
if (FLAG_use_range) {
@@ -2636,12 +2674,14 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return());
+ current_block()->Goto(function_return(), function_state()->drop_extra());
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = environment()->Pop();
- current_block()->AddLeaveInlined(return_value, function_return());
+ current_block()->AddLeaveInlined(return_value,
+ function_return(),
+ function_state()->drop_extra());
}
set_current_block(NULL);
}
@@ -2669,43 +2709,95 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
return Bailout("SwitchStatement: too many clauses");
}
+ HValue* context = environment()->LookupContext();
+
CHECK_ALIVE(VisitForValue(stmt->tag()));
AddSimulate(stmt->EntryId());
HValue* tag_value = Pop();
HBasicBlock* first_test_block = current_block();
- // 1. Build all the tests, with dangling true branches. Unconditionally
- // deoptimize if we encounter a non-smi comparison.
+ SwitchType switch_type = UNKNOWN_SWITCH;
+
+ // 1. Extract clause type
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) continue;
- if (!clause->label()->IsSmiLiteral()) {
- return Bailout("SwitchStatement: non-literal switch label");
+
+ if (switch_type == UNKNOWN_SWITCH) {
+ if (clause->label()->IsSmiLiteral()) {
+ switch_type = SMI_SWITCH;
+ } else if (clause->label()->IsStringLiteral()) {
+ switch_type = STRING_SWITCH;
+ } else {
+ return Bailout("SwitchStatement: non-literal switch label");
+ }
+ } else if ((switch_type == STRING_SWITCH &&
+ !clause->label()->IsStringLiteral()) ||
+ (switch_type == SMI_SWITCH &&
+ !clause->label()->IsSmiLiteral())) {
+ return Bailout("SwitchStatemnt: mixed label types are not supported");
}
+ }
- // Unconditionally deoptimize on the first non-smi compare.
- clause->RecordTypeFeedback(oracle());
- if (!clause->IsSmiCompare()) {
- // Finish with deoptimize and add uses of enviroment values to
- // account for invisible uses.
- current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
- set_current_block(NULL);
- break;
+ HUnaryControlInstruction* string_check = NULL;
+ HBasicBlock* not_string_block = NULL;
+
+ // Test switch's tag value if all clauses are string literals
+ if (switch_type == STRING_SWITCH) {
+ string_check = new(zone()) HIsStringAndBranch(tag_value);
+ first_test_block = graph()->CreateBasicBlock();
+ not_string_block = graph()->CreateBasicBlock();
+
+ string_check->SetSuccessorAt(0, first_test_block);
+ string_check->SetSuccessorAt(1, not_string_block);
+ current_block()->Finish(string_check);
+
+ set_current_block(first_test_block);
+ }
+
+ // 2. Build all the tests, with dangling true branches
+ for (int i = 0; i < clause_count; ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (clause->is_default()) continue;
+
+ if (switch_type == SMI_SWITCH) {
+ clause->RecordTypeFeedback(oracle());
}
- // Otherwise generate a compare and branch.
+ // Generate a compare and branch.
CHECK_ALIVE(VisitForValue(clause->label()));
HValue* label_value = Pop();
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
- compare->SetInputRepresentation(Representation::Integer32());
- HBasicBlock* body_block = graph()->CreateBasicBlock();
+
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+ HBasicBlock* body_block = graph()->CreateBasicBlock();
+
+ HControlInstruction* compare;
+
+ if (switch_type == SMI_SWITCH) {
+ if (!clause->IsSmiCompare()) {
+ // Finish with deoptimize and add uses of enviroment values to
+ // account for invisible uses.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ set_current_block(NULL);
+ break;
+ }
+
+ HCompareIDAndBranch* compare_ =
+ new(zone()) HCompareIDAndBranch(tag_value,
+ label_value,
+ Token::EQ_STRICT);
+ compare_->SetInputRepresentation(Representation::Integer32());
+ compare = compare_;
+ } else {
+ compare = new(zone()) HStringCompareAndBranch(context, tag_value,
+ label_value,
+ Token::EQ_STRICT);
+ }
+
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
current_block()->Finish(compare);
+
set_current_block(next_test_block);
}
@@ -2713,10 +2805,15 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// exit. This block is NULL if we deoptimized.
HBasicBlock* last_block = current_block();
- // 2. Loop over the clauses and the linked list of tests in lockstep,
+ if (not_string_block != NULL) {
+ last_block = CreateJoin(last_block, not_string_block, stmt->ExitId());
+ }
+
+ // 3. Loop over the clauses and the linked list of tests in lockstep,
// translating the clause bodies.
HBasicBlock* curr_test_block = first_test_block;
HBasicBlock* fall_through_block = NULL;
+
BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this);
for (int i = 0; i < clause_count; ++i) {
@@ -3125,12 +3222,22 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
- if (variable->mode() == Variable::LET) {
+ if (variable->mode() == LET) {
return Bailout("reference to let variable");
}
switch (variable->location()) {
case Variable::UNALLOCATED: {
- LookupResult lookup;
+ // Handle known global constants like 'undefined' specially to avoid a
+ // load from a global cell for them.
+ Handle<Object> constant_value =
+ isolate()->factory()->GlobalConstantFor(variable->name());
+ if (!constant_value.is_null()) {
+ HConstant* instr =
+ new(zone()) HConstant(constant_value, Representation::Tagged());
+ return ast_context()->ReturnInstruction(instr, expr->id());
+ }
+
+ LookupResult lookup(isolate());
GlobalPropertyAccess type =
LookupGlobalProperty(variable, &lookup, false);
@@ -3142,8 +3249,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) {
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
- HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+ HLoadGlobalCell* instr =
+ new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
return ast_context()->ReturnInstruction(instr, expr->id());
} else {
HValue* context = environment()->LookupContext();
@@ -3162,7 +3269,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
- if (variable->mode() == Variable::CONST &&
+ if (variable->mode() == CONST &&
value == graph()->GetConstantHole()) {
return Bailout("reference to uninitialized const variable");
}
@@ -3170,7 +3277,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
case Variable::CONTEXT: {
- if (variable->mode() == Variable::CONST) {
+ if (variable->mode() == CONST) {
return Bailout("reference to const context slot");
}
HValue* context = BuildContextChainWalk(variable);
@@ -3209,18 +3316,78 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+// Determines whether the given object literal boilerplate satisfies all
+// limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+static bool IsFastObjectLiteral(Handle<JSObject> boilerplate,
+ int max_depth,
+ int* max_properties,
+ int* total_size) {
+ if (max_depth <= 0) return false;
+
+ FixedArrayBase* elements = boilerplate->elements();
+ if (elements->length() > 0 &&
+ elements->map() != HEAP->fixed_cow_array_map()) {
+ return false;
+ }
+
+ FixedArray* properties = boilerplate->properties();
+ if (properties->length() > 0) {
+ return false;
+ } else {
+ int nof = boilerplate->map()->inobject_properties();
+ for (int i = 0; i < nof; i++) {
+ if ((*max_properties)-- <= 0) return false;
+ Handle<Object> value(boilerplate->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastObjectLiteral(value_object,
+ max_depth - 1,
+ max_properties,
+ total_size)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ *total_size += boilerplate->map()->instance_size();
+ return true;
+}
+
+
void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ Handle<JSFunction> closure = function_state()->compilation_info()->closure();
HValue* context = environment()->LookupContext();
- HObjectLiteral* literal =
- new(zone()) HObjectLiteral(context,
- expr->constant_properties(),
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth(),
- expr->has_function());
+ HInstruction* literal;
+
+ // Check whether to use fast or slow deep-copying for boilerplate.
+ int total_size = 0;
+ int max_properties = HObjectLiteralFast::kMaxObjectLiteralProperties;
+ Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
+ if (boilerplate->IsJSObject() &&
+ IsFastObjectLiteral(Handle<JSObject>::cast(boilerplate),
+ HObjectLiteralFast::kMaxObjectLiteralDepth,
+ &max_properties,
+ &total_size)) {
+ Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
+ literal = new(zone()) HObjectLiteralFast(context,
+ boilerplate_object,
+ total_size,
+ expr->literal_index(),
+ expr->depth());
+ } else {
+ literal = new(zone()) HObjectLiteralGeneric(context,
+ expr->constant_properties(),
+ expr->fast_elements(),
+ expr->literal_index(),
+ expr->depth(),
+ expr->has_function());
+ }
+
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
@@ -3250,7 +3417,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
literal,
name,
value,
- function_strict_mode());
+ function_strict_mode_flag());
AddInstruction(store);
AddSimulate(key->id());
} else {
@@ -3311,16 +3478,49 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
- // Load the elements array before the first store.
- if (elements == NULL) {
- elements = new(zone()) HLoadElements(literal);
- AddInstruction(elements);
- }
+ elements = new(zone()) HLoadElements(literal);
+ AddInstruction(elements);
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
+ HInstruction* elements_kind =
+ AddInstruction(new(zone()) HElementsKind(literal));
+ HBasicBlock* store_fast = graph()->CreateBasicBlock();
+ // Two empty blocks to satisfy edge split form.
+ HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
+ HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
+ HBasicBlock* store_generic = graph()->CreateBasicBlock();
+ HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
+ HBasicBlock* join = graph()->CreateBasicBlock();
+
+ HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
+ smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
+ smicheck->SetSuccessorAt(1, check_smi_only_elements);
+ current_block()->Finish(smicheck);
+ store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
+
+ set_current_block(check_smi_only_elements);
+ HCompareConstantEqAndBranch* smi_elements_check =
+ new(zone()) HCompareConstantEqAndBranch(elements_kind,
+ FAST_ELEMENTS,
+ Token::EQ_STRICT);
+ smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
+ smi_elements_check->SetSuccessorAt(1, store_generic);
+ current_block()->Finish(smi_elements_check);
+ store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
+
+ set_current_block(store_fast);
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
+ store_fast->Goto(join);
+
+ set_current_block(store_generic);
+ AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
+ store_generic->Goto(join);
+
+ join->SetJoinId(expr->id());
+ set_current_block(join);
+
AddSimulate(expr->GetIdForElement(i));
}
return ast_context()->ReturnValue(Pop());
@@ -3395,7 +3595,7 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
object,
name,
value,
- function_strict_mode());
+ function_strict_mode_flag());
}
@@ -3409,7 +3609,7 @@ HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
Handle<String> name = Handle<String>::cast(key->handle());
ASSERT(!name.is_null());
- LookupResult lookup;
+ LookupResult lookup(isolate());
SmallMapList* types = expr->GetReceiverTypes();
bool is_monomorphic = expr->IsMonomorphic() &&
ComputeStoredField(types->first(), name, &lookup);
@@ -3433,7 +3633,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
Handle<Map> map = types->at(i);
- LookupResult lookup;
+ LookupResult lookup(isolate());
if (ComputeStoredField(map, name, &lookup)) {
if (count == 0) {
AddInstruction(new(zone()) HCheckNonSmi(object)); // Only needed once.
@@ -3476,7 +3676,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// The HSimulate for the store should not see the stored value in
// effect contexts (it is not materialized at expr->id() in the
// unoptimized code).
- if (instr->HasSideEffects()) {
+ if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
AddSimulate(expr->id());
} else {
@@ -3516,7 +3716,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
ASSERT(!name.is_null());
SmallMapList* types = expr->GetReceiverTypes();
- LookupResult lookup;
+ LookupResult lookup(isolate());
if (expr->IsMonomorphic()) {
instr = BuildStoreNamed(object, value, expr);
@@ -3549,7 +3749,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
}
@@ -3561,16 +3761,16 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position,
int ast_id) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
+ HInstruction* instr =
+ new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -3580,11 +3780,11 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
global_object,
var->name(),
value,
- function_strict_mode());
+ function_strict_mode_flag());
instr->set_position(position);
AddInstruction(instr);
- ASSERT(instr->HasSideEffects());
- if (instr->HasSideEffects()) AddSimulate(ast_id);
+ ASSERT(instr->HasObservableSideEffects());
+ if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
}
}
@@ -3601,7 +3801,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == Variable::CONST || var->mode() == Variable::LET) {
+ if (var->mode() == CONST || var->mode() == LET) {
return Bailout("unsupported let or const compound assignment");
}
@@ -3641,7 +3841,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), Top());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId());
+ }
break;
}
@@ -3667,7 +3869,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
load = BuildLoadNamedGeneric(obj, prop);
}
PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+ if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -3675,14 +3877,14 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
HInstruction* store = BuildStoreNamed(obj, instr, prop);
AddInstruction(store);
// Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
} else {
@@ -3707,7 +3909,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
expr->RecordTypeFeedback(oracle());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -3746,7 +3948,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
HandlePropertyAssignment(expr);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == Variable::CONST) {
+ if (var->mode() == CONST) {
if (expr->op() != Token::INIT_CONST) {
return Bailout("non-initializer assignment to const");
}
@@ -3757,7 +3959,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
AddInstruction(new HUseConst(old_value));
- } else if (var->mode() == Variable::LET) {
+ } else if (var->mode() == LET) {
return Bailout("unsupported assignment to let");
}
@@ -3785,7 +3987,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
}
case Variable::CONTEXT: {
- ASSERT(var->mode() != Variable::CONST);
+ ASSERT(var->mode() != CONST);
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@@ -3805,7 +4007,9 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), Top());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId());
+ }
return ast_context()->ReturnValue(Pop());
}
@@ -3876,7 +4080,7 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
Property* expr,
Handle<Map> map,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty() && lookup.type() == FIELD) {
return BuildLoadNamedField(obj,
@@ -3931,6 +4135,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -3947,24 +4152,48 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
}
+HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ ElementsKind elements_kind,
+ bool is_store) {
+ if (is_store) {
+ ASSERT(val != NULL);
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ return new(zone()) HStoreKeyedFastDoubleElement(
+ elements, checked_key, val);
+ } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+ return new(zone()) HStoreKeyedFastElement(
+ elements, checked_key, val, elements_kind);
+ }
+ }
+ // It's an element load (!is_store).
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+ } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key);
+ }
+}
+
+
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- Expression* expr,
+ Handle<Map> map,
bool is_store) {
- ASSERT(expr->IsMonomorphic());
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- if (!map->has_fast_elements() &&
- !map->has_fast_double_elements() &&
+ HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+ bool fast_smi_only_elements = map->has_fast_smi_only_elements();
+ bool fast_elements = map->has_fast_elements();
+ bool fast_double_elements = map->has_fast_double_elements();
+ if (!fast_smi_only_elements &&
+ !fast_elements &&
+ !fast_double_elements &&
!map->has_external_array_elements()) {
return is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key);
}
- AddInstruction(new(zone()) HCheckNonSmi(object));
- HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
- bool fast_double_elements = map->has_fast_double_elements();
- if (is_store && map->has_fast_elements()) {
+ if (is_store && (fast_elements || fast_smi_only_elements)) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map()));
}
@@ -3979,28 +4208,15 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store);
}
- ASSERT(map->has_fast_elements() || fast_double_elements);
+ ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- if (is_store) {
- if (fast_double_elements) {
- return new(zone()) HStoreKeyedFastDoubleElement(elements,
- checked_key,
- val);
- } else {
- return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
- }
- } else {
- if (fast_double_elements) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
- } else {
- return new(zone()) HLoadKeyedFastElement(elements, checked_key);
- }
- }
+ return BuildFastElementAccess(elements, checked_key, val,
+ map->elements_kind(), is_store);
}
@@ -4014,7 +4230,6 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
bool* has_side_effects) {
*has_side_effects = false;
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
SmallMapList* maps = prop->GetReceiverTypes();
bool todo_external_array = false;
@@ -4024,15 +4239,55 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
type_todo[i] = false;
}
+ // Elements_kind transition support.
+ MapHandleList transition_target(maps->length());
+ // Collect possible transition targets.
+ MapHandleList possible_transitioned_maps(maps->length());
+ for (int i = 0; i < maps->length(); ++i) {
+ Handle<Map> map = maps->at(i);
+ ElementsKind elements_kind = map->elements_kind();
+ if (elements_kind == FAST_DOUBLE_ELEMENTS ||
+ elements_kind == FAST_ELEMENTS) {
+ possible_transitioned_maps.Add(map);
+ }
+ }
+ // Get transition target for each map (NULL == no transition).
+ for (int i = 0; i < maps->length(); ++i) {
+ Handle<Map> map = maps->at(i);
+ Handle<Map> transitioned_map =
+ map->FindTransitionedMap(&possible_transitioned_maps);
+ transition_target.Add(transitioned_map);
+ }
+
+ int num_untransitionable_maps = 0;
+ Handle<Map> untransitionable_map;
for (int i = 0; i < maps->length(); ++i) {
- ASSERT(maps->at(i)->IsMap());
- type_todo[maps->at(i)->elements_kind()] = true;
- if (maps->at(i)->elements_kind()
- >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
- todo_external_array = true;
+ Handle<Map> map = maps->at(i);
+ ASSERT(map->IsMap());
+ if (!transition_target.at(i).is_null()) {
+ object = AddInstruction(new(zone()) HTransitionElementsKind(
+ object, map, transition_target.at(i)));
+ } else {
+ type_todo[map->elements_kind()] = true;
+ if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+ todo_external_array = true;
+ }
+ num_untransitionable_maps++;
+ untransitionable_map = map;
}
}
+ // If only one map is left after transitioning, handle this case
+ // monomorphically.
+ if (num_untransitionable_maps == 1) {
+ HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
+ object, key, val, untransitionable_map, is_store));
+ *has_side_effects |= instr->HasObservableSideEffects();
+ instr->set_position(position);
+ return is_store ? NULL : instr;
+ }
+
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
HBasicBlock* join = graph()->CreateBasicBlock();
HInstruction* elements_kind_instr =
@@ -4042,14 +4297,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
- // FAST_ELEMENTS is assumed to be the first case.
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
+ // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
+ // arrays.
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- for (ElementsKind elements_kind = FAST_ELEMENTS;
+ for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
- // need to add some code that's executed for all external array cases.
+ // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
+ // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
+ // that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -4071,15 +4332,25 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
- if (elements_kind == FAST_ELEMENTS ||
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
- bool fast_double_elements =
- elements_kind == FAST_DOUBLE_ELEMENTS;
- if (is_store && elements_kind == FAST_ELEMENTS) {
+ if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ AddInstruction(new(zone()) HCheckSmi(val));
+ }
+ if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
}
+ // TODO(jkummerow): The need for these two blocks could be avoided
+ // in one of two ways:
+ // (1) Introduce ElementsKinds for JSArrays that are distinct from
+ // those for fast objects.
+ // (2) Put the common instructions into a third "join" block. This
+ // requires additional AST IDs that we can deopt to from inside
+ // that join block. They must be added to the Property class (when
+ // it's a keyed property) and registered in the full codegen.
HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
HHasInstanceTypeAndBranch* typecheck =
@@ -4089,30 +4360,16 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
current_block()->Finish(typecheck);
set_current_block(if_jsarray);
- HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
- AddInstruction(length);
+ HInstruction* length;
+ length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- if (is_store) {
- if (fast_double_elements) {
- access = AddInstruction(
- new(zone()) HStoreKeyedFastDoubleElement(elements,
- checked_key,
- val));
- } else {
- access = AddInstruction(
- new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
- }
- } else {
- if (fast_double_elements) {
- access = AddInstruction(
- new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
- } else {
- access = AddInstruction(
- new(zone()) HLoadKeyedFastElement(elements, checked_key));
- }
+ access = AddInstruction(BuildFastElementAccess(
+ elements, checked_key, val, elements_kind, is_store));
+ if (!is_store) {
Push(access);
}
- *has_side_effects |= access->HasSideEffects();
+
+ *has_side_effects |= access->HasObservableSideEffects();
if (position != -1) {
access->set_position(position);
}
@@ -4121,25 +4378,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- if (is_store) {
- if (fast_double_elements) {
- access = AddInstruction(
- new(zone()) HStoreKeyedFastDoubleElement(elements,
- checked_key,
- val));
- } else {
- access = AddInstruction(
- new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
- }
- } else {
- if (fast_double_elements) {
- access = AddInstruction(
- new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
- } else {
- access = AddInstruction(
- new(zone()) HLoadKeyedFastElement(elements, checked_key));
- }
- }
+ access = AddInstruction(BuildFastElementAccess(
+ elements, checked_key, val, elements_kind, is_store));
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -4150,7 +4390,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
access = AddInstruction(BuildExternalArrayElementAccess(
external_elements, checked_key, val, elements_kind, is_store));
}
- *has_side_effects |= access->HasSideEffects();
+ *has_side_effects |= access->HasObservableSideEffects();
access->set_position(position);
if (!is_store) {
Push(access);
@@ -4179,7 +4419,9 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
if (expr->IsMonomorphic()) {
- instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
+ Handle<Map> map = expr->GetMonomorphicReceiverType();
+ AddInstruction(new(zone()) HCheckNonSmi(obj));
+ instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
return HandlePolymorphicElementAccess(
@@ -4193,7 +4435,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
}
instr->set_position(position);
AddInstruction(instr);
- *has_side_effects = instr->HasSideEffects();
+ *has_side_effects = instr->HasObservableSideEffects();
return instr;
}
@@ -4207,7 +4449,7 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
object,
key,
value,
- function_strict_mode());
+ function_strict_mode_flag());
}
bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
@@ -4260,7 +4502,7 @@ void HGraphBuilder::VisitProperty(Property* expr) {
CHECK_ALIVE(VisitForValue(expr->obj()));
HInstruction* instr = NULL;
- if (expr->IsArrayLength()) {
+ if (expr->AsProperty()->IsArrayLength()) {
HValue* array = Pop();
AddInstruction(new(zone()) HCheckNonSmi(array));
HInstruction* mapcheck =
@@ -4449,7 +4691,7 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
}
-bool HGraphBuilder::TryInline(Call* expr) {
+bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
if (!FLAG_use_inlining) return false;
// The function call we are inlining is a method call if the call
@@ -4477,7 +4719,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
return false;
}
- // No context change required.
+#if !defined(V8_TARGET_ARCH_IA32)
+ // Target must be able to use caller's context.
CompilationInfo* outer_info = info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
@@ -4485,6 +4728,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
TraceInline(target, caller, "target requires context change");
return false;
}
+#endif
+
// Don't inline deeper than kMaxInliningLevels calls.
HEnvironment* env = environment();
@@ -4499,9 +4744,13 @@ bool HGraphBuilder::TryInline(Call* expr) {
}
// Don't inline recursive functions.
- if (*target_shared == outer_info->closure()->shared()) {
- TraceInline(target, caller, "target is recursive");
- return false;
+ for (FunctionState* state = function_state();
+ state != NULL;
+ state = state->outer()) {
+ if (state->compilation_info()->closure()->shared() == *target_shared) {
+ TraceInline(target, caller, "target is recursive");
+ return false;
+ }
}
// We don't want to add more than a certain number of nodes from inlining.
@@ -4514,7 +4763,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
// Parse and allocate variables.
CompilationInfo target_info(target);
- if (!ParserApi::Parse(&target_info) ||
+ if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
!Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
@@ -4574,11 +4823,11 @@ bool HGraphBuilder::TryInline(Call* expr) {
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
- if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+ if (target_shared->scope_info() == ScopeInfo::Empty()) {
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
- Handle<SerializedScopeInfo> target_scope_info =
- SerializedScopeInfo::Create(target_info.scope());
+ Handle<ScopeInfo> target_scope_info =
+ ScopeInfo::Create(target_info.scope());
target_shared->set_scope_info(*target_scope_info);
}
target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -4596,8 +4845,12 @@ bool HGraphBuilder::TryInline(Call* expr) {
ASSERT(target_shared->has_deoptimization_support());
TypeFeedbackOracle target_oracle(
Handle<Code>(target_shared->code()),
- Handle<Context>(target->context()->global_context()));
- FunctionState target_state(this, &target_info, &target_oracle);
+ Handle<Context>(target->context()->global_context()),
+ isolate());
+ // The function state is new-allocated because we need to delete it
+ // in two different places.
+ FunctionState* target_state =
+ new FunctionState(this, &target_info, &target_oracle, drop_extra);
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
@@ -4605,6 +4858,17 @@ bool HGraphBuilder::TryInline(Call* expr) {
function,
undefined,
call_kind);
+#ifdef V8_TARGET_ARCH_IA32
+ // IA32 only, overwrite the caller's context in the deoptimization
+ // environment with the correct one.
+ //
+ // TODO(kmillikin): implement the same inlining on other platforms so we
+ // can remove the unsightly ifdefs in this function.
+ HConstant* context = new HConstant(Handle<Context>(target->context()),
+ Representation::Tagged());
+ AddInstruction(context);
+ inner_env->BindContext(context);
+#endif
HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry);
body_entry->SetJoinId(expr->ReturnId());
@@ -4620,6 +4884,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
TraceInline(target, caller, "inline graph construction failed");
target_shared->DisableOptimization(*target);
inline_bailout_ = true;
+ delete target_state;
return true;
}
@@ -4635,9 +4900,11 @@ bool HGraphBuilder::TryInline(Call* expr) {
ASSERT(function_return() != NULL);
ASSERT(call_context()->IsEffect() || call_context()->IsValue());
if (call_context()->IsEffect()) {
- current_block()->Goto(function_return());
+ current_block()->Goto(function_return(), drop_extra);
} else {
- current_block()->AddLeaveInlined(undefined, function_return());
+ current_block()->AddLeaveInlined(undefined,
+ function_return(),
+ drop_extra);
}
} else {
// The graph builder assumes control can reach both branches of a
@@ -4645,13 +4912,14 @@ bool HGraphBuilder::TryInline(Call* expr) {
// simply jumping to the false target.
//
// TODO(3168478): refactor to avoid this.
+ ASSERT(call_context()->IsTest());
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
current_block()->Finish(test);
- empty_true->Goto(inlined_test_context()->if_true());
- empty_false->Goto(inlined_test_context()->if_false());
+ empty_true->Goto(inlined_test_context()->if_true(), drop_extra);
+ empty_false->Goto(inlined_test_context()->if_false(), drop_extra);
}
}
@@ -4663,19 +4931,21 @@ bool HGraphBuilder::TryInline(Call* expr) {
// Pop the return test context from the expression context stack.
ASSERT(ast_context() == inlined_test_context());
ClearInlinedTestContext();
+ delete target_state;
// Forward to the real test context.
if (if_true->HasPredecessor()) {
if_true->SetJoinId(expr->id());
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target);
+ if_true->Goto(true_target, function_state()->drop_extra());
}
if (if_false->HasPredecessor()) {
if_false->SetJoinId(expr->id());
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target);
+ if_false->Goto(false_target, function_state()->drop_extra());
}
set_current_block(NULL);
+ return true;
} else if (function_return()->HasPredecessor()) {
function_return()->SetJoinId(expr->id());
@@ -4683,7 +4953,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
} else {
set_current_block(NULL);
}
-
+ delete target_state;
return true;
}
@@ -4764,7 +5034,7 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
AddInstruction(square_root);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
- ASSERT(!square_root->HasSideEffects());
+ ASSERT(!square_root->HasObservableSideEffects());
result = new(zone()) HDiv(context, double_one, square_root);
} else if (exponent == 2.0) {
result = new(zone()) HMul(context, left, left);
@@ -4897,7 +5167,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
}
- if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
+ if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
expr->check_type() != RECEIVER_MAP_CHECK) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the IC
@@ -4925,8 +5195,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else {
+ expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- // FIXME.
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
@@ -4935,7 +5205,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
// If there is a global property cell for the name at compile time and
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
- LookupResult lookup;
+ LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
!info()->global_object()->IsAccessCheckNeeded()) {
@@ -4978,8 +5248,30 @@ void HGraphBuilder::VisitCall(Call* expr) {
Drop(argument_count);
}
+ } else if (expr->IsMonomorphic()) {
+ // The function is on the stack in the unoptimized code during
+ // evaluation of the arguments.
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global = new(zone()) HGlobalObject(context);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
+ AddInstruction(global);
+ PushAndAdd(receiver);
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+ if (TryInline(expr, true)) { // Drop function from environment.
+ return;
+ } else {
+ call = PreProcessCall(new(zone()) HInvokeFunction(context,
+ function,
+ argument_count));
+ Drop(1); // The function.
+ }
+
} else {
- CHECK_ALIVE(VisitArgument(expr->expression()));
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
@@ -4988,9 +5280,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- // The function to call is treated as an argument to the call function
- // stub.
- call = new(zone()) HCallFunction(context, argument_count + 1);
+ call = new(zone()) HCallFunction(context, function, argument_count);
Drop(argument_count + 1);
}
}
@@ -5185,7 +5475,6 @@ void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
void HGraphBuilder::VisitNot(UnaryOperation* expr) {
- // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
@@ -5207,7 +5496,7 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
materialize_true));
if (materialize_false->HasPredecessor()) {
- materialize_false->SetJoinId(expr->expression()->id());
+ materialize_false->SetJoinId(expr->MaterializeFalseId());
set_current_block(materialize_false);
Push(graph()->GetConstantFalse());
} else {
@@ -5215,7 +5504,7 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
}
if (materialize_true->HasPredecessor()) {
- materialize_true->SetJoinId(expr->expression()->id());
+ materialize_true->SetJoinId(expr->MaterializeTrueId());
set_current_block(materialize_true);
Push(graph()->GetConstantTrue());
} else {
@@ -5284,7 +5573,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == Variable::CONST) {
+ if (var->mode() == CONST) {
return Bailout("unsupported count operation with const");
}
// Argument of the count operation is a variable, not a property.
@@ -5328,7 +5617,9 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), after);
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId());
+ }
break;
}
@@ -5357,7 +5648,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
load = BuildLoadNamedGeneric(obj, prop);
}
PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CountId());
+ if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -5370,7 +5661,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
// necessary.
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
} else {
// Keyed property.
@@ -5447,38 +5738,34 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
AddInstruction(HCheckInstanceType::NewIsString(right));
instr = new(zone()) HStringAdd(context, left, right);
} else {
- instr = new(zone()) HAdd(context, left, right);
+ instr = HAdd::NewHAdd(zone(), context, left, right);
}
break;
case Token::SUB:
- instr = new(zone()) HSub(context, left, right);
+ instr = HSub::NewHSub(zone(), context, left, right);
break;
case Token::MUL:
- instr = new(zone()) HMul(context, left, right);
+ instr = HMul::NewHMul(zone(), context, left, right);
break;
case Token::MOD:
- instr = new(zone()) HMod(context, left, right);
+ instr = HMod::NewHMod(zone(), context, left, right);
break;
case Token::DIV:
- instr = new(zone()) HDiv(context, left, right);
+ instr = HDiv::NewHDiv(zone(), context, left, right);
break;
case Token::BIT_XOR:
- instr = new(zone()) HBitXor(context, left, right);
- break;
case Token::BIT_AND:
- instr = new(zone()) HBitAnd(context, left, right);
- break;
case Token::BIT_OR:
- instr = new(zone()) HBitOr(context, left, right);
+ instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
break;
case Token::SAR:
- instr = new(zone()) HSar(context, left, right);
+ instr = HSar::NewHSar(zone(), context, left, right);
break;
case Token::SHR:
- instr = new(zone()) HShr(context, left, right);
+ instr = HShr::NewHShr(zone(), context, left, right);
break;
case Token::SHL:
- instr = new(zone()) HShl(context, left, right);
+ instr = HShl::NewHShl(zone(), context, left, right);
break;
default:
UNREACHABLE();
@@ -5671,26 +5958,66 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
}
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
- Expression* expr,
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+ HTypeof* typeof_expr,
Handle<String> check) {
- CHECK_ALIVE(VisitForTypeOf(expr));
- HValue* expr_value = Pop();
- HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
- instr->set_position(compare_expr->position());
- return ast_context()->ReturnControl(instr, compare_expr->id());
+ // Note: The HTypeof itself is removed during canonicalization, if possible.
+ HValue* value = typeof_expr->value();
+ HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
+ instr->set_position(expr->position());
+ return ast_context()->ReturnControl(instr, expr->id());
}
-void HGraphBuilder::HandleLiteralCompareUndefined(
- CompareOperation* compare_expr, Expression* expr) {
- CHECK_ALIVE(VisitForValue(expr));
- HValue* lhs = Pop();
- HValue* rhs = graph()->GetConstantUndefined();
- HCompareObjectEqAndBranch* instr =
- new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
- instr->set_position(compare_expr->position());
- return ast_context()->ReturnControl(instr, compare_expr->id());
+static bool MatchLiteralCompareNil(HValue* left,
+ Token::Value op,
+ HValue* right,
+ Handle<Object> nil,
+ HValue** expr) {
+ if (left->IsConstant() &&
+ HConstant::cast(left)->handle().is_identical_to(nil) &&
+ Token::IsEqualityOp(op)) {
+ *expr = right;
+ return true;
+ }
+ return false;
+}
+
+
+static bool MatchLiteralCompareTypeof(HValue* left,
+ Token::Value op,
+ HValue* right,
+ HTypeof** typeof_expr,
+ Handle<String>* check) {
+ if (left->IsTypeof() &&
+ Token::IsEqualityOp(op) &&
+ right->IsConstant() &&
+ HConstant::cast(right)->HasStringValue()) {
+ *typeof_expr = HTypeof::cast(left);
+ *check = Handle<String>::cast(HConstant::cast(right)->handle());
+ return true;
+ }
+ return false;
+}
+
+
+static bool IsLiteralCompareTypeof(HValue* left,
+ Token::Value op,
+ HValue* right,
+ HTypeof** typeof_expr,
+ Handle<String>* check) {
+ return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
+ MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
+}
+
+
+static bool IsLiteralCompareNil(HValue* left,
+ Token::Value op,
+ HValue* right,
+ Handle<Object> nil,
+ HValue** expr) {
+ return MatchLiteralCompareNil(left, op, right, nil, expr) ||
+ MatchLiteralCompareNil(right, op, left, nil, expr);
}
@@ -5711,21 +6038,9 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- // Check for special cases that compare against literals.
- Expression *sub_expr;
- Handle<String> check;
- if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
- HandleLiteralCompareTypeof(expr, sub_expr, check);
- return;
- }
-
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
- HandleLiteralCompareUndefined(expr, sub_expr);
- return;
- }
-
TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
+ // Note that for the special typeof/null/undefined cases we get unknown here.
if (type_info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
@@ -5740,6 +6055,20 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
+ HTypeof* typeof_expr = NULL;
+ Handle<String> check;
+ if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
+ return HandleLiteralCompareTypeof(expr, typeof_expr, check);
+ }
+ HValue* sub_expr = NULL;
+ Factory* f = graph()->isolate()->factory();
+ if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
+ return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+ }
+ if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
+ return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+ }
+
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
@@ -5752,7 +6081,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
!info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
Handle<GlobalObject> global(info()->global_object());
- LookupResult lookup;
+ LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
if (lookup.IsProperty() &&
lookup.type() == NORMAL &&
@@ -5827,14 +6156,16 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
-void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
+void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+ HValue* value,
+ NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* value = Pop();
- HIsNullAndBranch* instr =
- new(zone()) HIsNullAndBranch(value, expr->is_strict());
+ EqualityKind kind =
+ expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
+ HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
+ instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
@@ -5843,7 +6174,8 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- HThisFunction* self = new(zone()) HThisFunction;
+ HThisFunction* self = new(zone()) HThisFunction(
+ function_state()->compilation_info()->closure());
return ast_context()->ReturnInstruction(self, expr->id());
}
@@ -5854,9 +6186,11 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function) {
- if (mode == Variable::LET) return Bailout("unsupported let declaration");
+ if (mode == LET || mode == CONST_HARMONY) {
+ return Bailout("unsupported harmony declaration");
+ }
Variable* var = proxy->var();
switch (var->location()) {
case Variable::UNALLOCATED:
@@ -5864,9 +6198,9 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT:
- if (mode == Variable::CONST || function != NULL) {
+ if (mode == CONST || function != NULL) {
HValue* value = NULL;
- if (mode == Variable::CONST) {
+ if (mode == CONST) {
value = graph()->GetConstantHole();
} else {
VisitForValue(function);
@@ -5877,7 +6211,7 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
HStoreContextSlot* store =
new HStoreContextSlot(context, var->index(), value);
AddInstruction(store);
- if (store->HasSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
} else {
environment()->Bind(var, value);
}
@@ -5917,9 +6251,7 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value,
- JS_FUNCTION_TYPE,
- JS_FUNCTION_PROXY_TYPE);
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -6047,7 +6379,44 @@ void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
- return Bailout("inlined runtime function: SetValueOf");
+ ASSERT(call->arguments()->length() == 2);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* value = Pop();
+ HValue* object = Pop();
+ // Check if object is a not a smi.
+ HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
+ HBasicBlock* if_smi = graph()->CreateBasicBlock();
+ HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
+ HBasicBlock* join = graph()->CreateBasicBlock();
+ smicheck->SetSuccessorAt(0, if_smi);
+ smicheck->SetSuccessorAt(1, if_heap_object);
+ current_block()->Finish(smicheck);
+ if_smi->Goto(join);
+
+ // Check if object is a JSValue.
+ set_current_block(if_heap_object);
+ HHasInstanceTypeAndBranch* typecheck =
+ new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
+ HBasicBlock* if_js_value = graph()->CreateBasicBlock();
+ HBasicBlock* not_js_value = graph()->CreateBasicBlock();
+ typecheck->SetSuccessorAt(0, if_js_value);
+ typecheck->SetSuccessorAt(1, not_js_value);
+ current_block()->Finish(typecheck);
+ not_js_value->Goto(join);
+
+ // Create in-object property store to kValueOffset.
+ set_current_block(if_js_value);
+ Handle<String> name = isolate()->factory()->undefined_symbol();
+ AddInstruction(new HStoreNamedField(object,
+ name,
+ value,
+ true, // in-object store.
+ JSValue::kValueOffset));
+ if_js_value->Goto(join);
+ join->SetJoinId(call->id());
+ set_current_block(join);
+ return ast_context()->ReturnValue(value);
}
@@ -6210,12 +6579,37 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
}
CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+
HValue* function = Pop();
HValue* context = environment()->LookupContext();
- HInvokeFunction* result =
- new(zone()) HInvokeFunction(context, function, arg_count);
+
+ // Branch for function proxies, or other non-functions.
+ HHasInstanceTypeAndBranch* typecheck =
+ new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+ HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
+ HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
+ HBasicBlock* join = graph()->CreateBasicBlock();
+ typecheck->SetSuccessorAt(0, if_jsfunction);
+ typecheck->SetSuccessorAt(1, if_nonfunction);
+ current_block()->Finish(typecheck);
+
+ set_current_block(if_jsfunction);
+ HInstruction* invoke_result = AddInstruction(
+ new(zone()) HInvokeFunction(context, function, arg_count));
Drop(arg_count);
- return ast_context()->ReturnInstruction(result, call->id());
+ Push(invoke_result);
+ if_jsfunction->Goto(join);
+
+ set_current_block(if_nonfunction);
+ HInstruction* call_result = AddInstruction(
+ new(zone()) HCallFunction(context, function, arg_count));
+ Drop(arg_count);
+ Push(call_result);
+ if_nonfunction->Goto(join);
+
+ set_current_block(join);
+ join->SetJoinId(call->id());
+ return ast_context()->ReturnValue(Pop());
}
@@ -6255,6 +6649,18 @@ void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
}
+void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result =
+ new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ result->set_transcendental_type(TranscendentalCache::TAN);
+ Drop(1);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
@@ -6472,7 +6878,7 @@ HEnvironment* HEnvironment::CopyForInlining(
// If the function we are inlining is a strict mode function or a
// builtin function, pass undefined as the receiver for function
// calls (instead of the global receiver).
- if ((target->shared()->native() || function->strict_mode()) &&
+ if ((target->shared()->native() || !function->is_classic_mode()) &&
call_kind == CALL_AS_FUNCTION) {
inner->SetValueAt(0, undefined);
}
@@ -6819,7 +7225,7 @@ void HPhase::End() const {
}
#ifdef DEBUG
- if (graph_ != NULL) graph_->Verify();
+ if (graph_ != NULL) graph_->Verify(false); // No full verify.
if (allocator_ != NULL) allocator_->Verify();
#endif
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 03fbc7322..ded1356d1 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -121,7 +121,7 @@ class HBasicBlock: public ZoneObject {
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block);
+ void Goto(HBasicBlock* block, bool drop_extra = false);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@@ -133,7 +133,9 @@ class HBasicBlock: public ZoneObject {
// Add the inlined function exit sequence, adding an HLeaveInlined
// instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
+ void AddLeaveInlined(HValue* return_value,
+ HBasicBlock* target,
+ bool drop_extra = false);
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
@@ -243,11 +245,13 @@ class HGraph: public ZoneObject {
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
- bool CheckPhis();
+ bool CheckArgumentsPhiUses();
- // Returns false if there are phi-uses of hole values comming
- // from uninitialized consts.
- bool CollectPhis();
+ // Returns false if there are phi-uses of an uninitialized const
+ // which are not supported by the optimizing compiler.
+ bool CheckConstPhiUses();
+
+ void CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
@@ -283,7 +287,7 @@ class HGraph: public ZoneObject {
}
#ifdef DEBUG
- void Verify() const;
+ void Verify(bool do_full_verify) const;
#endif
private:
@@ -601,16 +605,18 @@ class TestContext: public AstContext {
};
-class FunctionState BASE_EMBEDDED {
+class FunctionState {
public:
FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
- TypeFeedbackOracle* oracle);
+ TypeFeedbackOracle* oracle,
+ bool drop_extra);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
+ bool drop_extra() { return drop_extra_; }
HBasicBlock* function_return() { return function_return_; }
TestContext* test_context() { return test_context_; }
void ClearInlinedTestContext() {
@@ -630,6 +636,10 @@ class FunctionState BASE_EMBEDDED {
// inlined. NULL when not inlining.
AstContext* call_context_;
+ // Indicate if we have to drop an extra value from the environment on
+ // return from inlined functions.
+ bool drop_extra_;
+
// When inlining in an effect of value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
// pair of return blocks in the context. When not inlining, there is no
@@ -647,6 +657,7 @@ class FunctionState BASE_EMBEDDED {
class HGraphBuilder: public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
+ enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
@@ -726,6 +737,8 @@ class HGraphBuilder: public AstVisitor {
TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
+ FunctionState* function_state() const { return function_state_; }
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -744,7 +757,6 @@ class HGraphBuilder: public AstVisitor {
static const int kMaxSourceSize = 600;
// Simple accessors.
- FunctionState* function_state() const { return function_state_; }
void set_function_state(FunctionState* state) { function_state_ = state; }
AstContext* ast_context() const { return ast_context_; }
@@ -767,8 +779,9 @@ class HGraphBuilder: public AstVisitor {
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
- bool function_strict_mode() {
- return function_state()->compilation_info()->is_strict_mode();
+ StrictModeFlag function_strict_mode_flag() {
+ return function_state()->compilation_info()->is_classic_mode()
+ ? kNonStrictMode : kStrictMode;
}
// Generators for inline runtime functions.
@@ -780,7 +793,7 @@ class HGraphBuilder: public AstVisitor {
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void HandleDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function);
void VisitDelete(UnaryOperation* expr);
@@ -881,7 +894,7 @@ class HGraphBuilder: public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
- bool TryInline(Call* expr);
+ bool TryInline(Call* expr, bool drop_extra = false);
bool TryInlineBuiltinFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@@ -910,11 +923,12 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
SmallMapList* types,
Handle<String> name);
- void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
- Expression* expr,
+ void HandleLiteralCompareTypeof(CompareOperation* expr,
+ HTypeof* typeof_expr,
Handle<String> check);
- void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
- Expression* expr);
+ void HandleLiteralCompareNil(CompareOperation* expr,
+ HValue* value,
+ NilValue nil);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
HValue* string,
@@ -938,11 +952,16 @@ class HGraphBuilder: public AstVisitor {
HValue* val,
ElementsKind elements_kind,
bool is_store);
+ HInstruction* BuildFastElementAccess(HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ ElementsKind elements_kind,
+ bool is_store);
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- Expression* expr,
+ Handle<Map> map,
bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 0ca2d6b4a..5f67077ad 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -78,7 +78,9 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
@@ -88,9 +90,14 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Assembler::set_target_address_at(pc_, target);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -112,10 +119,16 @@ Object** RelocInfo::target_object_address() {
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -142,11 +155,18 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -161,6 +181,11 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Assembler::set_target_address_at(pc_ + 1, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -194,14 +219,14 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
+ visitor->VisitEmbeddedPointer(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
@@ -222,14 +247,14 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 999647487..fb625fb24 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -55,6 +55,8 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@@ -86,23 +88,23 @@ void CpuFeatures::Probe() {
__ pushfd();
__ push(ecx);
__ push(ebx);
- __ mov(ebp, Operand(esp));
+ __ mov(ebp, esp);
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd();
__ pop(eax);
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
__ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax);
__ popfd();
__ pushfd();
__ pop(eax);
- __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
+ __ xor_(eax, edx); // Different if CPUID is supported.
__ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, Operand(eax));
- __ xor_(edx, Operand(edx));
+ __ xor_(eax, eax);
+ __ xor_(edx, edx);
__ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in
@@ -118,13 +120,13 @@ void CpuFeatures::Probe() {
// Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported.
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
__ or_(eax, 1 << CPUID);
- __ mov(edx, Operand(ecx));
+ __ mov(edx, ecx);
// Done.
__ bind(&done);
- __ mov(esp, Operand(ebp));
+ __ mov(esp, ebp);
__ pop(ebx);
__ pop(ecx);
__ popfd();
@@ -286,6 +288,18 @@ bool Operand::is_reg(Register reg) const {
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
}
+
+bool Operand::is_reg_only() const {
+ return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
+}
+
+
+Register Operand::reg() const {
+ ASSERT(is_reg_only());
+ return Register::from_code(buf_[0] & 0x07);
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -614,26 +628,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
}
-void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(imm32);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(handle);
-}
-
-
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
@@ -701,6 +695,13 @@ void Assembler::add(Register dst, const Operand& src) {
}
+void Assembler::add(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x01);
+ emit_operand(src, dst);
+}
+
+
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@@ -741,25 +742,29 @@ void Assembler::and_(const Operand& dst, Register src) {
void Assembler::cmpb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
- EMIT(0x80);
- emit_operand(edi, op); // edi == 7
+ if (op.is_reg(eax)) {
+ EMIT(0x3C);
+ } else {
+ EMIT(0x80);
+ emit_operand(edi, op); // edi == 7
+ }
EMIT(imm8);
}
-void Assembler::cmpb(const Operand& dst, Register src) {
- ASSERT(src.is_byte_register());
+void Assembler::cmpb(const Operand& op, Register reg) {
+ ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
- emit_operand(src, dst);
+ emit_operand(reg, op);
}
-void Assembler::cmpb(Register dst, const Operand& src) {
- ASSERT(dst.is_byte_register());
+void Assembler::cmpb(Register reg, const Operand& op) {
+ ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
- emit_operand(dst, src);
+ emit_operand(reg, op);
}
@@ -1069,18 +1074,6 @@ void Assembler::shr_cl(Register dst) {
}
-void Assembler::subb(const Operand& op, int8_t imm8) {
- EnsureSpace ensure_space(this);
- if (op.is_reg(eax)) {
- EMIT(0x2c);
- } else {
- EMIT(0x80);
- emit_operand(ebp, op); // ebp == 5
- }
- EMIT(imm8);
-}
-
-
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(5, dst, x);
@@ -1094,14 +1087,6 @@ void Assembler::sub(Register dst, const Operand& src) {
}
-void Assembler::subb(Register dst, const Operand& src) {
- ASSERT(dst.code() < 4);
- EnsureSpace ensure_space(this);
- EMIT(0x2A);
- emit_operand(dst, src);
-}
-
-
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x29);
@@ -1158,6 +1143,10 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) {
+ if (op.is_reg_only() && op.reg().code() >= 4) {
+ test(op, Immediate(imm8));
+ return;
+ }
EnsureSpace ensure_space(this);
EMIT(0xF6);
emit_operand(eax, op);
@@ -1178,10 +1167,10 @@ void Assembler::xor_(Register dst, const Operand& src) {
}
-void Assembler::xor_(const Operand& src, Register dst) {
+void Assembler::xor_(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x31);
- emit_operand(dst, src);
+ emit_operand(src, dst);
}
@@ -1637,6 +1626,13 @@ void Assembler::fsin() {
}
+void Assembler::fptan() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF2);
+}
+
+
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
@@ -2471,7 +2467,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
}
- RelocInfo rinfo(pc_, rmode, data);
+ RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4698e3ed1..d798f818a 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -75,6 +75,8 @@ struct Register {
static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
+ ASSERT(code >= 0);
+ ASSERT(code < kNumRegisters);
Register r = { code };
return r;
}
@@ -300,9 +302,6 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED {
public:
- // reg
- INLINE(explicit Operand(Register reg));
-
// XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg));
@@ -347,12 +346,16 @@ class Operand BASE_EMBEDDED {
// Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const;
+ // Returns true if this Operand is a wrapper for one register.
+ bool is_reg_only() const;
+
+ // Asserts that this Operand is a wrapper for one register and returns the
+ // register.
+ Register reg() const;
+
private:
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
+ // reg
+ INLINE(explicit Operand(Register reg));
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -362,7 +365,15 @@ class Operand BASE_EMBEDDED {
inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+ byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocInfo::Mode rmode_;
+
friend class Assembler;
+ friend class MacroAssembler;
+ friend class LCodeGen;
};
@@ -671,7 +682,9 @@ class Assembler : public AssemblerBase {
void leave();
// Moves
+ void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
+ void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src);
@@ -687,17 +700,22 @@ class Assembler : public AssemblerBase {
void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src);
+ void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src);
+ void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src);
+ void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src);
+ void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src);
// Conditional moves
- void cmov(Condition cc, Register dst, int32_t imm32);
- void cmov(Condition cc, Register dst, Handle<Object> handle);
+ void cmov(Condition cc, Register dst, Register src) {
+ cmov(cc, dst, Operand(src));
+ }
void cmov(Condition cc, Register dst, const Operand& src);
// Flag management.
@@ -715,24 +733,31 @@ class Assembler : public AssemblerBase {
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
+ void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src);
+ void add(const Operand& dst, Register src);
+ void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
+ void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src);
- void and_(const Operand& src, Register dst);
+ void and_(const Operand& dst, Register src);
void and_(const Operand& dst, const Immediate& x);
+ void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register src, const Operand& dst);
- void cmpb(const Operand& dst, Register src);
+ void cmpb(Register reg, const Operand& op);
+ void cmpb(const Operand& op, Register reg);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
+ void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
@@ -748,6 +773,7 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
+ void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@@ -764,8 +790,10 @@ class Assembler : public AssemblerBase {
void not_(Register dst);
void or_(Register dst, int32_t imm32);
+ void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src);
+ void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
@@ -776,35 +804,42 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src);
+ void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst);
+ void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst);
- void subb(const Operand& dst, int8_t imm8);
- void subb(Register dst, const Operand& src);
+ void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x);
+ void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm);
+ void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
+ void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
+ void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src);
- void xor_(const Operand& src, Register dst);
+ void xor_(const Operand& dst, Register src);
+ void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
+ void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
// Miscellaneous
@@ -835,6 +870,7 @@ class Assembler : public AssemblerBase {
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr);
+ void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
@@ -845,6 +881,7 @@ class Assembler : public AssemblerBase {
// unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
+ void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
@@ -887,6 +924,7 @@ class Assembler : public AssemblerBase {
void fchs();
void fcos();
void fsin();
+ void fptan();
void fyl2x();
void fadd(int i);
@@ -929,6 +967,7 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -969,12 +1008,14 @@ class Assembler : public AssemblerBase {
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
+ void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
- void movd(const Operand& src, XMMRegister dst);
+ void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
+ void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& src, XMMRegister dst);
+ void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
@@ -987,11 +1028,17 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+ void pextrd(Register dst, XMMRegister src, int8_t offset) {
+ pextrd(Operand(dst), src, offset);
+ }
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void pinsrd(XMMRegister dst, Register src, int8_t offset) {
+ pinsrd(dst, Operand(src), offset);
+ }
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
- void movntdqa(XMMRegister src, const Operand& dst);
+ void movntdqa(XMMRegister dst, const Operand& src);
void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@@ -1045,6 +1092,9 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
protected:
bool emit_debug_code() const { return emit_debug_code_; }
@@ -1057,9 +1107,8 @@ class Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; }
+
private:
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 310ea3d12..e12e79af7 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
- __ add(Operand(eax), Immediate(num_extra_args + 1));
+ __ add(eax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -80,25 +80,34 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- edi: constructor function
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_function_call);
+ __ j(not_equal, &slow);
// Jump to the function-specific construct stub.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(Operand(ebx));
+ __ jmp(ebx);
// edi: called object
// eax: number of arguments
+ // ecx: object map
+ Label do_call;
+ __ bind(&slow);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
Handle<Code> arguments_adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ SetCallKind(ecx, CALL_AS_METHOD);
@@ -113,264 +122,271 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT(!is_api_function || !count_constructions);
// Enter a construct frame.
- __ EnterConstructFrame();
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Store a smi-tagged arguments count on the stack.
- __ SmiTag(eax);
- __ push(eax);
+ // Store a smi-tagged arguments count on the stack.
+ __ SmiTag(eax);
+ __ push(eax);
- // Push the function to invoke on the stack.
- __ push(edi);
+ // Push the function to invoke on the stack.
+ __ push(edi);
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(not_equal, &rt_call);
#endif
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // edi: constructor
+ __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
- __ push(eax);
- __ push(edi);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ dec_b(FieldOperand(ecx,
+ SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
- __ push(edi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ push(eax);
+ __ push(edi);
- __ pop(edi);
- __ pop(eax);
+ __ push(edi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ bind(&allocate);
- }
+ __ pop(edi);
+ __ pop(eax);
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
- __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- { Label loop, entry;
- // To allow for truncation.
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+ __ AllocateInNewSpace(
+ edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ Factory* factory = masm->isolate()->factory();
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ // Set extra fields in the newly allocated object.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ __ mov(edx, factory->undefined_value());
if (count_constructions) {
+ __ movzx_b(esi,
+ FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lea(esi,
+ Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(esi, edi);
+ __ Assert(less_equal,
+ "Unexpected number of pre-allocated property fields.");
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
- } else {
+ }
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ // Calculate the total number of properties described by the map.
+ __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ movzx_b(ecx,
+ FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ add(edx, ecx);
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+ __ sub(edx, ecx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // ebx: JSObject
+ // edi: start of next object (will be start of FixedArray)
+ // edx: number of elements in properties array
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // ebx: JSObject
+ // edi: FixedArray
+ // edx: number of elements
+ // ecx: start of next object
+ __ mov(eax, factory->fixed_array_map());
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
+ __ SmiTag(edx);
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
+
+ // Initialize the fields to undefined.
+ // ebx: JSObject
+ // edi: FixedArray
+ // ecx: start of next object
+ { Label loop, entry;
__ mov(edx, factory->undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(eax, 0), edx);
+ __ add(eax, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(eax, ecx);
+ __ j(below, &loop);
}
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(ecx, 0), edx);
- __ add(Operand(ecx), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(ecx, Operand(edi));
- __ j(less, &loop);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ or_(Operand(ebx), Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, Operand(ecx));
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, Operand(ecx));
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- { Label loop, entry;
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(Operand(eax), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, Operand(ecx));
- __ j(below, &loop);
- }
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // ebx: JSObject
+ // edi: FixedArray
+ __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
- // Continue with JSObject being successfully allocated
- // ebx: JSObject
- __ jmp(&allocated);
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject
+ __ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
- }
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // ebx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(ebx);
+ }
- // Allocate the new receiver object using the runtime call.
- __ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
- // edi: function (constructor)
- __ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(ebx, Operand(eax)); // store result in ebx
+ // Allocate the new receiver object using the runtime call.
+ __ bind(&rt_call);
+ // Must restore edi (constructor) before calling runtime.
+ __ mov(edi, Operand(esp, 0));
+ // edi: function (constructor)
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(ebx, eax); // store result in ebx
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(edi);
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(edi);
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- __ SmiUntag(eax);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ SmiUntag(eax);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
- // Setup pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+ // Setup pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, Operand(eax));
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, eax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ if (is_api_function) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit);
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // get arguments count
- __ LeaveConstructFrame();
+ // Leave construct frame.
+ }
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -399,57 +415,58 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
- // Clear the context before we push it when entering the JS frame.
+ // Clear the context before we push it when entering the internal frame.
__ Set(esi, Immediate(0));
- // Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(ecx);
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+ // Get the function from the frame and setup the context.
+ __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+ __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+ // Push the function and the receiver onto the stack.
+ __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Set(ecx, Immediate(0));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(Operand(ecx));
- __ bind(&entry);
- __ cmp(ecx, Operand(eax));
- __ j(not_equal, &loop);
+ // Load the number of arguments and setup pointer to the arguments.
+ __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+ __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
- // Get the function from the stack and call it.
- __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver
+ // Copy arguments to the stack in a loop.
+ Label loop, entry;
+ __ Set(ecx, Immediate(0));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
+ __ push(Operand(edx, 0)); // dereference handle
+ __ inc(ecx);
+ __ bind(&entry);
+ __ cmp(ecx, eax);
+ __ j(not_equal, &loop);
+
+ // Get the function from the stack and call it.
+ // kPointerSize for the receiver.
+ __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+
+ // Invoke the code.
+ if (is_construct) {
+ __ call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Invoke the code.
- if (is_construct) {
- __ call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Exit the internal frame. Notice that this also removes the empty.
+ // context and the function left on the stack by the code
+ // invocation.
}
-
- // Exit the JS frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- __ LeaveInternalFrame();
- __ ret(1 * kPointerSize); // remove receiver
+ __ ret(kPointerSize); // Remove receiver.
}
@@ -464,68 +481,68 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
+ // Push a copy of the function.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(eax));
+ __ jmp(eax);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(eax));
+ __ jmp(eax);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Pass the function and deoptimization type to the runtime system.
+ __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Get the full codegen state from the stack and untag it.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -566,9 +583,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ pushad();
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
__ popad();
__ ret(0);
}
@@ -579,7 +597,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{ Label done;
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &done);
__ pop(ebx);
__ push(Immediate(factory->undefined_value()));
@@ -631,18 +649,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ SmiTag(eax);
- __ push(eax);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
+ { // In order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ push(eax);
+
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ Set(edx, Immediate(0)); // restore
+
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
- __ pop(eax);
- __ SmiUntag(eax);
- __ LeaveInternalFrame();
// Restore the function to edi.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver);
@@ -695,22 +716,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(zero, &function);
__ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(Operand(edx), Immediate(1));
+ __ cmp(edx, Immediate(1));
__ j(not_equal, &non_proxy);
__ pop(edx); // return address
__ push(edi); // re-add proxy object as additional argument
__ push(edx);
__ inc(eax);
+ __ SetCallKind(ecx, CALL_AS_FUNCTION);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
+ __ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -726,13 +748,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
__ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
- __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
}
@@ -740,161 +762,158 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kArgumentsOffset = 2 * kPointerSize;
static const int kReceiverOffset = 3 * kPointerSize;
static const int kFunctionOffset = 4 * kPointerSize;
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ __ push(Operand(ebp, kFunctionOffset)); // push this
+ __ push(Operand(ebp, kArgumentsOffset)); // push arguments
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edi);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, eax);
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(eax); // limit
+ __ push(Immediate(0)); // index
+
+ // Get the receiver.
+ __ mov(ebx, Operand(ebp, kReceiverOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ EnterInternalFrame();
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, Operand(esp));
- __ sub(ecx, Operand(edi));
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, Operand(eax));
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, Operand(edx));
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ mov(ebx, Operand(ebp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &push_receiver);
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &push_receiver);
- // Change context eagerly to get the right global object if necessary.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Factory* factory = masm->isolate()->factory();
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &push_receiver);
- Factory* factory = masm->isolate()->factory();
+ // Compute the receiver in non-strict mode.
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &call_to_object);
+ __ cmp(ebx, factory->null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, factory->undefined_value());
+ __ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &push_receiver);
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &call_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &push_receiver);
-
- __ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, Operand(eax));
- __ jmp(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ mov(eax, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
+ __ bind(&call_to_object);
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ jmp(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(ebx);
- // Push the nth argument.
- __ push(eax);
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
- // Update the index on the stack and in register eax.
- __ mov(eax, Operand(ebp, kIndexOffset));
- __ add(Operand(eax), Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), eax);
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
- __ bind(&entry);
- __ cmp(eax, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
+ // Push the nth argument.
+ __ push(eax);
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(eax);
- __ SmiUntag(eax);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Update the index on the stack and in register eax.
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ add(eax, Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, kIndexOffset), eax);
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ bind(&entry);
+ __ cmp(eax, Operand(ebp, kLimitOffset));
+ __ j(not_equal, &loop);
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(edi); // add function proxy as last argument
- __ inc(eax);
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(eax);
+ __ SmiUntag(eax);
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
+ frame_scope.GenerateLeaveFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(edi); // add function proxy as last argument
+ __ inc(eax);
+ __ Set(ebx, Immediate(0));
+ __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
+ // Leave internal frame.
+ }
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+}
// Allocate an empty JSArray. The allocated array is put into the result
@@ -907,10 +926,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity >= 0);
-
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ mov(scratch1, FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
@@ -968,7 +986,6 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
- STATIC_ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@@ -980,13 +997,17 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
}
} else {
Label loop, entry;
+ __ mov(scratch2, Immediate(initial_capacity));
__ jmp(&entry);
__ bind(&loop);
- __ mov(Operand(scratch1, 0), factory->the_hole_value());
- __ add(Operand(scratch1), Immediate(kPointerSize));
+ __ mov(FieldOperand(scratch1,
+ scratch2,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ factory->the_hole_value());
__ bind(&entry);
- __ cmp(scratch1, Operand(scratch2));
- __ j(below, &loop);
+ __ dec(scratch2);
+ __ j(not_sign, &loop);
}
}
@@ -1082,7 +1103,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ bind(&loop);
__ stos();
__ bind(&entry);
- __ cmp(edi, Operand(elements_array_end));
+ __ cmp(edi, elements_array_end);
__ j(below, &loop);
__ bind(&done);
}
@@ -1120,7 +1141,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ push(eax);
// Check for array construction with zero arguments.
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
@@ -1131,7 +1152,6 @@ static void ArrayNativeCode(MacroAssembler* masm,
ebx,
ecx,
edi,
- kPreallocatedArrayElements,
&prepare_generic_code_call);
__ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
__ pop(ebx);
@@ -1147,7 +1167,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ j(not_equal, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(not_zero, &not_empty_array);
// The single argument passed is zero, so we jump to the code above used to
@@ -1160,7 +1180,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
+ __ Drop(2); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
@@ -1250,7 +1270,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
__ mov(Operand(edx, 0), eax);
- __ add(Operand(edx), Immediate(kPointerSize));
+ __ add(edx, Immediate(kPointerSize));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
@@ -1356,14 +1376,14 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, Operand(ecx));
+ __ cmp(edi, ecx);
__ Assert(equal, "Unexpected String function");
}
// Load the first argument into eax and get rid of the rest
// (including the receiver).
Label no_arguments;
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, &no_arguments);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ pop(ecx);
@@ -1439,12 +1459,13 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Invoke the conversion builtin and put the result into ebx.
__ bind(&convert_argument);
__ IncrementCounter(counters->string_ctor_conversions(), 1);
- __ EnterInternalFrame();
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ }
__ mov(ebx, eax);
__ jmp(&argument_is_string);
@@ -1461,17 +1482,18 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
- __ EnterInternalFrame();
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
__ ret(0);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
- __ mov(ebp, Operand(esp));
+ __ mov(ebp, esp);
// Store the arguments adaptor context sentinel.
__ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1515,7 +1537,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
@@ -1533,8 +1555,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ inc(edi);
__ push(Operand(eax, 0));
- __ sub(Operand(eax), Immediate(kPointerSize));
- __ cmp(edi, Operand(ebx));
+ __ sub(eax, Immediate(kPointerSize));
+ __ cmp(edi, ebx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1547,17 +1569,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
// ebx = expected - actual.
- __ sub(ebx, Operand(eax));
+ __ sub(ebx, eax);
// eax = -actual - 1
__ neg(eax);
- __ sub(Operand(eax), Immediate(1));
+ __ sub(eax, Immediate(1));
Label copy;
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
- __ sub(Operand(edi), Immediate(kPointerSize));
- __ test(eax, Operand(eax));
+ __ sub(edi, Immediate(kPointerSize));
+ __ test(eax, eax);
__ j(not_zero, &copy);
// Fill remaining expected arguments with undefined values.
@@ -1565,7 +1587,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&fill);
__ inc(eax);
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(less, &fill);
}
@@ -1573,7 +1595,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ call(Operand(edx));
+ __ call(edx);
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
@@ -1583,13 +1605,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(Operand(edx));
+ __ jmp(edx);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (!CpuFeatures::IsSupported(SSE2)) {
+ if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}
@@ -1616,15 +1638,16 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
- __ EnterInternalFrame();
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
Label skip;
- __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+ __ cmp(eax, Immediate(Smi::FromInt(-1)));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@@ -1638,7 +1661,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
- __ Abort("Unreachable code: returned from tail call.");
+ if (FLAG_debug_code) {
+ __ Abort("Unreachable code: returned from tail call.");
+ }
__ bind(&ok);
__ ret(0);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 1009aaf57..68eebd3a0 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -34,6 +34,8 @@
#include "isolate.h"
#include "jsregexp.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
@@ -49,7 +51,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&check_heap_number);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+ __ cmp(ebx, Immediate(factory->heap_number_map()));
__ j(not_equal, &call_builtin, Label::kNear);
__ ret(0);
@@ -70,9 +72,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -150,7 +152,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
// Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
+ __ mov(esi, eax);
__ ret(1 * kPointerSize);
// Need to collect. Call into runtime system.
@@ -159,6 +161,139 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + (1 * kPointerSize)]: function
+ // [esp + (2 * kPointerSize)]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function or sentinel from the stack.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+ // Get the serialized scope info from the stack.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+
+ // Setup the object header.
+ Factory* factory = masm->isolate()->factory();
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ factory->block_context_map());
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmp(ecx, 0);
+ __ Assert(equal, message);
+ }
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
+ __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
+ __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
+
+ // Copy the global object from the previous context.
+ __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
+
+ // Initialize the rest of the slots to the hole value.
+ if (slots_ == 1) {
+ __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
+ factory->the_hole_value());
+ } else {
+ __ mov(ebx, factory->the_hole_value());
+ for (int i = 0; i < slots_; i++) {
+ __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ mov(esi, eax);
+ __ ret(2 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ //
+ // ecx: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = 0;
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+ int size = JSArray::kSize + elements_size;
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ lea(edx, Operand(eax, JSArray::kSize));
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+ // Copy the elements array.
+ if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(edx, i), ebx);
+ }
+ } else {
+ ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+ int i;
+ for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(edx, i), ebx);
+ }
+ while (i < elements_size) {
+ __ fld_d(FieldOperand(ecx, i));
+ __ fstp_d(FieldOperand(edx, i));
+ i += kDoubleSize;
+ }
+ ASSERT(i == elements_size);
+ }
+ }
+}
+
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -166,13 +301,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [esp + (2 * kPointerSize)]: literal index.
// [esp + (3 * kPointerSize)]: literals array.
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
// Load boilerplate object into ecx and check if we need to create a
// boilerplate.
- Label slow_case;
__ mov(ecx, Operand(esp, 3 * kPointerSize));
__ mov(eax, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kPointerSize == 4);
@@ -182,16 +312,43 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize));
Factory* factory = masm->isolate()->factory();
__ cmp(ecx, factory->undefined_value());
+ Label slow_case;
__ j(equal, &slow_case);
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ // ecx is boilerplate object.
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ CheckMap(ebx, factory->fixed_cow_array_map(),
+ &check_fast_elements, DONT_DO_SMI_CHECK);
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&check_fast_elements);
+ __ CheckMap(ebx, factory->fixed_array_map(),
+ &double_elements, DONT_DO_SMI_CHECK);
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Handle<Map> expected_map;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map = factory->fixed_array_map();
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map = factory->fixed_double_array_map();
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map = factory->fixed_cow_array_map();
}
@@ -202,43 +359,66 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(ecx);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + kPointerSize]: object literal flags.
+ // [esp + (2 * kPointerSize)]: constant properties.
+ // [esp + (3 * kPointerSize)]: literal index.
+ // [esp + (4 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kPointerSize == 4);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(ecx, factory->undefined_value());
+ __ j(equal, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ cmp(eax, Immediate(size >> kPointerSizeLog2));
+ __ j(not_equal, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
}
// Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
+ __ ret(4 * kPointerSize);
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
Label patch;
Factory* factory = masm->isolate()->factory();
const Register argument = eax;
@@ -336,6 +516,41 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ pushad();
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ }
+ }
+ const int argument_count = 1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, ecx);
+ __ mov(Operand(esp, 0 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ }
+ __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ }
+ __ popad();
+ __ ret(0);
+}
+
+
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
@@ -470,27 +685,27 @@ static void IntegerConvert(MacroAssembler* masm,
// Check whether the exponent is too big for a 64 bit signed integer.
static const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
// Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
} else {
// Load ecx with zero. We use this either for the final shift or
// for the answer.
- __ xor_(ecx, Operand(ecx));
+ __ xor_(ecx, ecx);
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ __ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
__ j(equal, &right_exponent, Label::kNear);
@@ -503,7 +718,7 @@ static void IntegerConvert(MacroAssembler* masm,
// >>> operator has a tendency to generate numbers with an exponent of 31.
const uint32_t big_non_smi_exponent =
(HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
// in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
@@ -522,9 +737,9 @@ static void IntegerConvert(MacroAssembler* masm,
// Shift down 21 bits to get the most significant 11 bits or the low
// mantissa word.
__ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
+ __ or_(ecx, scratch2);
// We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
+ __ test(scratch, scratch);
__ j(positive, &done, Label::kNear);
__ neg(ecx);
__ jmp(&done, Label::kNear);
@@ -538,14 +753,14 @@ static void IntegerConvert(MacroAssembler* masm,
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
+ __ sub(scratch2, Immediate(zero_exponent));
// ecx already has a Smi zero.
__ j(less, &done, Label::kNear);
// We have a shifted exponent between 0 and 30 in scratch2.
__ shr(scratch2, HeapNumber::kExponentShift);
__ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
+ __ sub(ecx, scratch2);
__ bind(&right_exponent);
// Here ecx is the shift, scratch is the exponent word.
@@ -565,19 +780,19 @@ static void IntegerConvert(MacroAssembler* masm,
// Shift down 22 bits to get the most significant 10 bits or the low
// mantissa word.
__ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
+ __ or_(scratch2, scratch);
// Move down according to the exponent.
__ shr_cl(scratch2);
// Now the unsigned answer is in scratch2. We need to move it to ecx and
// we may need to fix the sign.
Label negative;
- __ xor_(ecx, Operand(ecx));
+ __ xor_(ecx, ecx);
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
__ j(greater, &negative, Label::kNear);
__ mov(ecx, scratch2);
__ jmp(&done, Label::kNear);
__ bind(&negative);
- __ sub(ecx, Operand(scratch2));
+ __ sub(ecx, scratch2);
__ bind(&done);
}
}
@@ -679,13 +894,13 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
__ JumpIfNotSmi(eax, non_smi, non_smi_near);
// We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, slow, slow_near);
// Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
__ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
+ __ sub(eax, edx);
__ j(overflow, undo, undo_near);
__ ret(0);
}
@@ -706,7 +921,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot(
void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
}
@@ -760,7 +975,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
Immediate(HeapNumber::kSignMask)); // Flip sign.
} else {
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
// edx: operand
Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -768,11 +983,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated, Label::kNear);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(edx);
+ }
__ bind(&heapnumber_allocated);
// eax: allocated 'empty' number
@@ -815,15 +1031,16 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push the original HeapNumber on the stack. The integer value can't
+ // be stored since it's untagged and not in the smi range (so we can't
+ // smi-tag it). We'll recalculate the value after the GC instead.
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ // New HeapNumber is in eax.
+ __ pop(edx);
+ }
// IntegerConvert uses ebx and edi as scratch registers.
// This conversion won't go slow-case.
IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
@@ -833,7 +1050,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
+ __ cvtsi2sd(xmm0, ecx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ push(ecx);
@@ -947,6 +1164,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -1022,7 +1243,7 @@ void BinaryOpStub::GenerateSmiCode(
// eax in case the result is not a smi.
ASSERT(!left.is(ecx) && !right.is(ecx));
__ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
+ __ or_(right, left); // Bitwise or is commutative.
combined = right;
break;
@@ -1034,7 +1255,7 @@ void BinaryOpStub::GenerateSmiCode(
case Token::DIV:
case Token::MOD:
__ mov(combined, right);
- __ or_(combined, Operand(left));
+ __ or_(combined, left);
break;
case Token::SHL:
@@ -1044,7 +1265,7 @@ void BinaryOpStub::GenerateSmiCode(
// for the smi check register.
ASSERT(!left.is(ecx) && !right.is(ecx));
__ mov(ecx, right);
- __ or_(right, Operand(left));
+ __ or_(right, left);
combined = right;
break;
@@ -1067,12 +1288,12 @@ void BinaryOpStub::GenerateSmiCode(
case Token::BIT_XOR:
ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ __ xor_(right, left); // Bitwise xor is commutative.
break;
case Token::BIT_AND:
ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
+ __ and_(right, left); // Bitwise and is commutative.
break;
case Token::SHL:
@@ -1121,12 +1342,12 @@ void BinaryOpStub::GenerateSmiCode(
case Token::ADD:
ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
+ __ add(right, left); // Addition is commutative.
__ j(overflow, &use_fp_on_smis);
break;
case Token::SUB:
- __ sub(left, Operand(right));
+ __ sub(left, right);
__ j(overflow, &use_fp_on_smis);
__ mov(eax, left);
break;
@@ -1140,7 +1361,7 @@ void BinaryOpStub::GenerateSmiCode(
// Remove tag from one of the operands (but keep sign).
__ SmiUntag(right);
// Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ imul(right, left); // Multiplication is commutative.
__ j(overflow, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(right, combined, &use_fp_on_smis);
@@ -1151,7 +1372,7 @@ void BinaryOpStub::GenerateSmiCode(
// save the left operand.
__ mov(edi, left);
// Check for 0 divisor.
- __ test(right, Operand(right));
+ __ test(right, right);
__ j(zero, &use_fp_on_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
@@ -1167,7 +1388,7 @@ void BinaryOpStub::GenerateSmiCode(
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(eax, combined, &use_fp_on_smis);
// Check that the remainder is zero.
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(eax);
@@ -1175,7 +1396,7 @@ void BinaryOpStub::GenerateSmiCode(
case Token::MOD:
// Check for 0 divisor.
- __ test(right, Operand(right));
+ __ test(right, right);
__ j(zero, &not_smis);
// Sign extend left into edx:eax.
@@ -1226,11 +1447,11 @@ void BinaryOpStub::GenerateSmiCode(
break;
case Token::ADD:
// Revert right = right + left.
- __ sub(right, Operand(left));
+ __ sub(right, left);
break;
case Token::SUB:
// Revert left = left - right.
- __ add(left, Operand(right));
+ __ add(left, right);
break;
case Token::MUL:
// Right was clobbered but a copy is in ebx.
@@ -1268,7 +1489,7 @@ void BinaryOpStub::GenerateSmiCode(
ASSERT_EQ(Token::SHL, op_);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
+ __ cvtsi2sd(xmm0, left);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1290,11 +1511,11 @@ void BinaryOpStub::GenerateSmiCode(
switch (op_) {
case Token::ADD:
// Revert right = right + left.
- __ sub(right, Operand(left));
+ __ sub(right, left);
break;
case Token::SUB:
// Revert left = left - right.
- __ add(left, Operand(right));
+ __ add(left, right);
break;
case Token::MUL:
// Right was clobbered but a copy is in ebx.
@@ -1486,7 +1707,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Check result type if it is currently Int32.
if (result_type_ <= BinaryOpIC::INT32) {
__ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, Operand(ecx));
+ __ cvtsi2sd(xmm2, ecx);
__ ucomisd(xmm0, xmm2);
__ j(not_zero, &not_int32);
__ j(carry, &not_int32);
@@ -1548,9 +1769,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
&not_int32);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
@@ -1574,7 +1795,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
+ __ mov(ebx, eax); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
@@ -1594,7 +1815,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
+ __ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1675,7 +1896,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
__ cmp(edx, factory->undefined_value());
__ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
- __ xor_(edx, Operand(edx));
+ __ xor_(edx, edx);
} else {
__ mov(edx, Immediate(factory->nan_value()));
}
@@ -1684,7 +1905,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
__ cmp(eax, factory->undefined_value());
__ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
- __ xor_(eax, Operand(eax));
+ __ xor_(eax, eax);
} else {
__ mov(eax, Immediate(factory->nan_value()));
}
@@ -1762,9 +1983,9 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
use_sse3_,
&not_floats);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
@@ -1788,7 +2009,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
+ __ mov(ebx, eax); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
@@ -1808,7 +2029,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
+ __ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1961,9 +2182,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
use_sse3_,
&call_runtime);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
@@ -1987,7 +2208,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
+ __ mov(ebx, eax); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
@@ -2007,7 +2228,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
+ __ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2117,10 +2338,10 @@ void BinaryOpStub::GenerateHeapResultAllocation(
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
// Now edx can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
- __ mov(edx, Operand(ebx));
+ __ mov(edx, ebx);
__ bind(&skip_allocation);
// Use object in edx as a result holder
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
break;
}
case OVERWRITE_RIGHT:
@@ -2178,7 +2399,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
__ sar(eax, 1);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ sub(esp, Immediate(2 * kPointerSize));
__ mov(Operand(esp, 0), eax);
__ fild_s(Operand(esp, 0));
__ fst_d(Operand(esp, 0));
@@ -2189,7 +2410,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Check if input is a HeapNumber.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+ __ cmp(ebx, Immediate(factory->heap_number_map()));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// low and high words into ebx, edx.
@@ -2201,12 +2422,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
} else { // UNTAGGED.
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
- __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
+ __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ pshufd(xmm0, xmm1, 0x1);
- __ movd(Operand(edx), xmm0);
+ __ movd(edx, xmm0);
}
- __ movd(Operand(ebx), xmm1);
+ __ movd(ebx, xmm1);
}
// ST[0] or xmm1 == double value
@@ -2215,15 +2436,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
- __ xor_(ecx, Operand(edx));
+ __ xor_(ecx, edx);
__ mov(eax, ecx);
__ sar(eax, 16);
- __ xor_(ecx, Operand(eax));
+ __ xor_(ecx, eax);
__ mov(eax, ecx);
__ sar(eax, 8);
- __ xor_(ecx, Operand(eax));
+ __ xor_(ecx, eax);
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(Operand(ecx),
+ __ and_(ecx,
Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] or xmm1 == double value.
@@ -2238,7 +2459,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(eax, cache_array_index));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
@@ -2264,6 +2485,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmp(edx, Operand(ecx, kIntSize));
__ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->transcendental_cache_hit(), 1);
__ mov(eax, Operand(ecx, 2 * kIntSize));
if (tagged) {
__ fstp(0);
@@ -2274,6 +2497,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
}
__ bind(&cache_miss);
+ __ IncrementCounter(counters->transcendental_cache_miss(), 1);
// Update cache with new value.
// We are short on registers, so use no_reg as scratch.
// This gives slightly larger code.
@@ -2281,10 +2505,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ __ add(esp, Immediate(kDoubleSize));
}
GenerateOperation(masm);
__ mov(Operand(ecx, 0), ebx);
@@ -2299,20 +2523,21 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
- __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm);
__ fstp_d(Operand(esp, 0));
__ movdbl(xmm1, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ __ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Allocate an unused object bigger than a HeapNumber.
+ __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
@@ -2329,10 +2554,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- __ EnterInternalFrame();
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(eax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2343,6 +2569,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
switch (type_) {
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -2356,7 +2583,9 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Input value is on FP stack, and also in ebx/edx.
// Input value is possibly in xmm1.
// Address of result (a newly allocated HeapNumber) may be in eax.
- if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+ if (type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS ||
+ type_ == TranscendentalCache::TAN) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
@@ -2364,13 +2593,13 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// If argument is outside the range -2^63..2^63, fsin/cos doesn't
// work. We must reduce it to the appropriate range.
__ mov(edi, edx);
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
+ __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
int supported_exponent_limit =
(63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+ __ cmp(edi, Immediate(supported_exponent_limit));
__ j(below, &in_range, Label::kNear);
// Check for infinity and NaN. Both return NaN for sin.
- __ cmp(Operand(edi), Immediate(0x7ff00000));
+ __ cmp(edi, Immediate(0x7ff00000));
Label non_nan_result;
__ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
@@ -2379,7 +2608,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ push(Immediate(0x7ff80000));
__ push(Immediate(0));
__ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ add(esp, Immediate(2 * kPointerSize));
__ jmp(&done, Label::kNear);
__ bind(&non_nan_result);
@@ -2395,7 +2624,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fwait();
__ fnstsw_ax();
// Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(Operand(eax), Immediate(5));
+ __ test(eax, Immediate(5));
__ j(zero, &no_exceptions, Label::kNear);
__ fnclex();
__ bind(&no_exceptions);
@@ -2408,7 +2637,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fprem1();
__ fwait();
__ fnstsw_ax();
- __ test(Operand(eax), Immediate(0x400 /* C2 */));
+ __ test(eax, Immediate(0x400 /* C2 */));
// If C2 is set, computation only has partial result. Loop to
// continue computation.
__ j(not_zero, &partial_remainder_loop);
@@ -2427,6 +2656,12 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
case TranscendentalCache::COS:
__ fcos();
break;
+ case TranscendentalCache::TAN:
+ // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+ // FP register stack.
+ __ fptan();
+ __ fstp(0); // Pop FP register stack.
+ break;
default:
UNREACHABLE();
}
@@ -2541,13 +2776,13 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
+ __ cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ bind(&done);
@@ -2571,12 +2806,12 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
+ __ cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
@@ -2592,11 +2827,11 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
__ mov(scratch, left);
ASSERT(!scratch.is(right)); // We're about to clobber scratch.
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
+ __ cvtsi2sd(xmm0, scratch);
__ mov(scratch, right);
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
+ __ cvtsi2sd(xmm1, scratch);
}
@@ -2604,12 +2839,12 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
__ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, Operand(scratch));
+ __ cvtsi2sd(xmm2, scratch);
__ ucomisd(xmm0, xmm2);
__ j(not_zero, non_int32);
__ j(carry, non_int32);
__ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, Operand(scratch));
+ __ cvtsi2sd(xmm2, scratch);
__ ucomisd(xmm1, xmm2);
__ j(not_zero, non_int32);
__ j(carry, non_int32);
@@ -2717,7 +2952,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in xmm3 - we need this several times later on.
__ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, Operand(ecx));
+ __ cvtsi2sd(xmm3, ecx);
Label exponent_nonsmi;
Label base_nonsmi;
@@ -2728,7 +2963,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Optimized version when both exponent and base are smis.
Label powi;
__ SmiUntag(edx);
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ jmp(&powi);
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
@@ -2770,11 +3005,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// base has the original value of the exponent - if the exponent is
// negative return 1/result.
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, Operand(ecx));
+ __ movd(xmm0, ecx);
__ cvtss2sd(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
@@ -2797,7 +3032,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label handle_special_cases;
__ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
@@ -2806,7 +3041,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &call_runtime);
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+ __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
@@ -2817,7 +3052,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Test for -0.5.
// Load xmm2 with -0.5.
__ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, Operand(ecx));
+ __ movd(xmm2, ecx);
__ cvtss2sd(xmm2, xmm2);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
@@ -2873,13 +3108,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor, Label::kNear);
// Check index against formal parameters count limit passed in
// through register eax. Use unsigned comparison to get negative
// check for free.
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(above_equal, &slow, Label::kNear);
// Read the argument from the stack and return it.
@@ -2895,7 +3130,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// comparison to get negative check for free.
__ bind(&adaptor);
__ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, Operand(ecx));
+ __ cmp(edx, ecx);
__ j(above_equal, &slow, Label::kNear);
// Read the argument from the stack and return it.
@@ -2926,7 +3161,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime, Label::kNear);
// Patch the arguments.length and the parameters pointer.
@@ -2957,7 +3192,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label adaptor_frame, try_allocate;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
@@ -2976,7 +3211,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[4] = parameter count (tagged)
// esp[8] = address of receiver argument
// Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(less_equal, &try_allocate, Label::kNear);
__ mov(ebx, ecx);
@@ -2990,7 +3225,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
@@ -2999,7 +3234,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+ __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -3014,7 +3249,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
__ mov(edi, Operand(edi,
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
@@ -3069,7 +3304,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
@@ -3093,7 +3328,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 2 * kPointerSize));
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
- __ sub(ebx, Operand(eax));
+ __ sub(ebx, eax);
__ mov(ecx, FACTORY->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
@@ -3110,12 +3345,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ jmp(&parameters_test, Label::kNear);
__ bind(&parameters_loop);
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(eax, Immediate(Smi::FromInt(1)));
__ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
__ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ add(ebx, Immediate(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &parameters_loop, Label::kNear);
__ pop(ecx);
@@ -3135,18 +3370,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label arguments_loop, arguments_test;
__ mov(ebx, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 4 * kPointerSize));
- __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling?
- __ sub(Operand(edx), ebx);
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
+ __ sub(edx, ebx);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ sub(Operand(edx), Immediate(kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
__ mov(eax, Operand(edx, 0));
__ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ add(ebx, Immediate(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(less, &arguments_loop, Label::kNear);
// Restore.
@@ -3174,7 +3409,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Label adaptor_frame, try_allocate, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// Get the length from the frame.
@@ -3193,11 +3428,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -3224,7 +3459,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &done, Label::kNear);
// Get the parameters pointer from the stack.
@@ -3246,8 +3481,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
+ __ add(edi, Immediate(kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &loop);
@@ -3268,10 +3503,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// esp[0]: return address
@@ -3294,7 +3525,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -3315,7 +3546,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
__ j(not_equal, &runtime);
// ecx: RegExp data (FixedArray)
@@ -3325,7 +3556,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// uses the asumption that smis are 2 * their untagged value.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ __ add(edx, Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
__ j(above, &runtime);
@@ -3347,7 +3578,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
__ JumpIfNotSmi(eax, &runtime);
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(above_equal, &runtime);
// ecx: RegExp data (FixedArray)
@@ -3367,8 +3598,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, Operand(eax));
+ __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, eax);
__ j(greater, &runtime);
// Reset offset for possibly sliced string.
@@ -3380,27 +3611,40 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
// First check for flat two byte string.
- __ and_(ebx,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ __ and_(ebx, kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask |
+ kShortExternalStringMask);
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ascii string.
- __ and_(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
+ // Any other flat string must be a flat ascii string. None of the following
+ // string type tests will succeed if subject is not a string or a short
+ // external string.
+ __ and_(ebx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
__ j(zero, &seq_ascii_string, Label::kNear);
+ // ebx: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ cmp(ebx, Immediate(kExternalStringTag));
__ j(less, &cons_string);
- __ j(equal, &runtime);
+ __ j(equal, &external_string);
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+ __ j(not_zero, &runtime);
// String is sliced.
__ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
@@ -3422,10 +3666,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kStringRepresentationMask | kStringEncodingMask);
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be ascii.
+ // Any other flat string must be sequential ascii or external.
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask);
- __ j(not_zero, &runtime);
+ __ j(not_zero, &external_string);
__ bind(&seq_ascii_string);
// eax: subject string (flat ascii)
@@ -3504,14 +3748,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
__ mov(esi, FieldOperand(esi, String::kLengthOffset));
- __ add(esi, Operand(edi)); // Calculate input end wrt offset.
+ __ add(esi, edi); // Calculate input end wrt offset.
__ SmiUntag(edi);
- __ add(ebx, Operand(edi)); // Calculate input start wrt offset.
+ __ add(ebx, edi); // Calculate input start wrt offset.
// ebx: start index of the input string
// esi: end index of the input string
Label setup_two_byte, setup_rest;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
__ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
@@ -3531,8 +3775,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&setup_rest);
// Locate the code entry and call it.
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(Operand(edx));
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(edx);
// Drop arguments and come back to JS mode.
__ LeaveApiExitFrame();
@@ -3553,11 +3797,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
masm->isolate());
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location(
- masm->isolate())));
+ __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(eax, Operand::StaticVariable(pending_exception));
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(equal, &runtime);
// For exception, throw the exception again.
@@ -3578,7 +3820,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure to match, return null.
- __ mov(Operand(eax), factory->null_value());
+ __ mov(eax, factory->null_value());
__ ret(4 * kPointerSize);
// Load RegExp data.
@@ -3589,7 +3831,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Calculate number of capture registers (number_of_captures + 1) * 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ __ add(edx, Immediate(2)); // edx was a smi.
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
@@ -3605,12 +3847,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store last subject and last input.
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+ __ RecordWriteField(ebx,
+ RegExpImpl::kLastSubjectOffset,
+ eax,
+ edi,
+ kDontSaveFPRegs);
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+ __ RecordWriteField(ebx,
+ RegExpImpl::kLastInputOffset,
+ eax,
+ edi,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -3624,7 +3872,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ sub(Operand(edx), Immediate(1));
+ __ sub(edx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer.
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
@@ -3642,6 +3890,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
+ // External string. Short external strings have already been ruled out.
+ // eax: subject string (expected to be external)
+ // ebx: scratch
+ __ bind(&external_string);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ test_b(ebx, kIsIndirectStringMask);
+ __ Assert(zero, "external string expected, but not found");
+ }
+ __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(not_zero, &seq_ascii_string);
+ __ jmp(&seq_two_byte_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3655,7 +3924,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Label done;
__ mov(ebx, Operand(esp, kPointerSize * 3));
__ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+ __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
@@ -3715,10 +3984,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// ebx: Start of elements in FixedArray.
// edx: the hole.
Label loop;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ bind(&loop);
__ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(Operand(ecx), Immediate(1));
+ __ sub(ecx, Immediate(1));
__ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
__ jmp(&loop);
@@ -3743,16 +4012,16 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch = scratch2;
// Load the number string cache.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
__ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
__ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(Operand(mask), Immediate(1)); // Make mask.
+ __ sub(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -3778,7 +4047,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
// Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
+ __ and_(scratch, mask);
Register index = scratch;
Register probe = mask;
__ mov(probe,
@@ -3804,7 +4073,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ bind(&smi_hash_calculated);
// Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
+ __ and_(scratch, mask);
Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmp(object,
@@ -3856,10 +4125,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Compare two smis if required.
if (include_smi_compare_) {
Label non_smi, smi_done;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
__ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
+ __ sub(edx, eax); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done, Label::kNear);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
__ bind(&smi_done);
@@ -3867,8 +4136,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ ret(0);
__ bind(&non_smi);
} else if (FLAG_debug_code) {
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected smi operands.");
}
@@ -3880,7 +4149,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// for NaN and undefined.
{
Label not_identical;
- __ cmp(eax, Operand(edx));
+ __ cmp(eax, edx);
__ j(not_equal, &not_identical);
if (cc_ != equal) {
@@ -3929,7 +4198,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
- __ add(edx, Operand(edx));
+ __ add(edx, edx);
__ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc_ == equal) {
STATIC_ASSERT(EQUAL != 1);
@@ -3963,19 +4232,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
+ __ and_(ecx, eax);
+ __ test(ecx, edx);
__ j(not_zero, &not_smis, Label::kNear);
// One operand is a smi.
// Check whether the non-smi is a heap number.
STATIC_ASSERT(kSmiTagMask == 1);
// ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
+ __ sub(ecx, Immediate(0x01));
__ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
+ __ xor_(ebx, eax);
+ __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, eax);
// if eax was smi, ebx is now edx, else eax.
// Check if the non-smi operand is a heap number.
@@ -4037,9 +4306,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
+ __ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
+ __ cmov(below, eax, ecx);
__ ret(0);
} else {
FloatingPointHelper::CheckFloatOperands(
@@ -4198,43 +4467,105 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+ code->set_has_function_cache(RecordCallTarget());
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
+ // 1 ~ size of the test eax opcode.
+ Object* cell = Memory::Object_at(address + kPointerSize + 1);
+ // Low-level because clearing happens during GC.
+ reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value(
+ RawUninitializedSentinel(heap));
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
+ // 1 ~ size of the test eax opcode.
+ Object* cell = Memory::Object_at(address + kPointerSize + 1);
+ return JSGlobalPropertyCell::cast(cell)->value();
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // edi : the function to call
+ Isolate* isolate = masm->isolate();
Label slow, non_function;
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
// function stub.
if (ReceiverMightBeImplicit()) {
- Label call;
+ Label receiver_ok;
// Get the receiver from the stack.
// +1 ~ return address
__ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
// Call as function is indicated with the hole.
- __ cmp(eax, masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &call, Label::kNear);
+ __ cmp(eax, isolate->factory()->the_hole_value());
+ __ j(not_equal, &receiver_ok, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
__ mov(ebx, GlobalObjectOperand());
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
- __ bind(&call);
+ __ bind(&receiver_ok);
}
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
// Check that the function really is a JavaScript function.
__ JumpIfSmi(edi, &non_function);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &slow);
+ if (RecordCallTarget()) {
+ // Cache the called function in a global property cell in the
+ // instruction stream after the call. Cache states are uninitialized,
+ // monomorphic (indicated by a JSFunction), and megamorphic.
+ Label initialize, call;
+ // Load the cache cell address into ebx and the cache state into ecx.
+ __ mov(ebx, Operand(esp, 0)); // Return address.
+ __ mov(ebx, Operand(ebx, 1)); // 1 ~ sizeof 'test eax' opcode in bytes.
+ __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(ecx, edi);
+ __ j(equal, &call, Label::kNear);
+ __ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
+ __ j(equal, &call, Label::kNear);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
+ __ j(equal, &initialize, Label::kNear);
+ // MegamorphicSentinel is a root so no write-barrier is needed.
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(MegamorphicSentinel(isolate)));
+ __ jmp(&call, Label::kNear);
+
+ // An uninitialized cache is patched with the function.
+ __ bind(&initialize);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+ __ mov(ecx, edi);
+ __ RecordWriteField(ebx,
+ JSGlobalPropertyCell::kValueOffset,
+ ecx,
+ edx,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET, // Cells are rescanned.
+ OMIT_SMI_CHECK);
+
+ __ bind(&call);
+ }
+
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
if (ReceiverMightBeImplicit()) {
Label call_as_function;
- __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+ __ cmp(eax, isolate->factory()->the_hole_value());
__ j(equal, &call_as_function);
__ InvokeFunction(edi,
actual,
@@ -4251,6 +4582,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case.
+ __ mov(ebx, Operand(esp, 0));
+ __ mov(ebx, Operand(ebx, 1));
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(MegamorphicSentinel(isolate)));
+ }
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
@@ -4262,8 +4601,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ SetCallKind(ecx, CALL_AS_FUNCTION);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
{
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
@@ -4275,8 +4613,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Set(ebx, Immediate(0));
__ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
@@ -4286,6 +4623,35 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ // It is important that the store buffer overflow stubs are generated first.
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(eax);
}
@@ -4332,7 +4698,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- __ call(Operand(ebx));
+ __ call(ebx);
// Result is in eax or edx:eax - do not destroy these registers!
if (always_allocate_scope) {
@@ -4364,8 +4730,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// should have returned some failure value.
if (FLAG_debug_code) {
__ push(edx);
- __ mov(edx, Operand::StaticVariable(
- ExternalReference::the_hole_value_location(masm->isolate())));
+ __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
Label okay;
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
@@ -4376,7 +4741,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_);
+ __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
__ ret(0);
// Handling of failure.
@@ -4393,10 +4758,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- ExternalReference the_hole_location =
- ExternalReference::the_hole_value_location(masm->isolate());
__ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx, Operand::StaticVariable(the_hole_location));
+ __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
// Special handling of termination exceptions which are uncatchable
@@ -4431,7 +4794,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// a garbage collection and retrying the builtin (twice).
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_);
+ __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -4482,12 +4845,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
// Setup frame.
__ push(ebp);
- __ mov(ebp, Operand(esp));
+ __ mov(ebp, esp);
// Push marker in two places.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
@@ -4515,38 +4878,38 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
masm->isolate());
__ mov(Operand::StaticVariable(pending_exception), eax);
__ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
__ jmp(&exit);
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// Clear any pending exceptions.
- ExternalReference the_hole_location =
- ExternalReference::the_hole_value_location(masm->isolate());
- __ mov(edx, Operand::StaticVariable(the_hole_location));
+ __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception), edx);
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. Notice that we
- // cannot store a reference to the trampoline code directly in this
- // stub, because the builtin stubs may not have been generated yet.
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return. Notice that we cannot store a
+ // reference to the trampoline code directly in this stub, because the
+ // builtin stubs may not have been generated yet.
if (is_construct) {
- ExternalReference construct_entry(
- Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ masm->isolate());
__ mov(edx, Immediate(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline,
@@ -4555,7 +4918,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
__ mov(edx, Operand(edx, 0)); // deref address
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(Operand(edx));
+ __ call(edx);
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -4563,8 +4926,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
- __ cmp(Operand(ebx),
- Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
@@ -4578,7 +4940,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ pop(ebx);
__ pop(esi);
__ pop(edi);
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
+ __ add(esp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
__ pop(ebp);
@@ -4621,8 +4983,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -4644,22 +5006,23 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Look up the function and the map in the instanceof cache.
Label miss;
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ __ cmp(function, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
__ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(
- scratch, times_pointer_size, roots_address));
+ scratch, times_pointer_size, roots_array_start));
__ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(
- scratch, times_pointer_size, roots_address));
+ scratch, times_pointer_size, roots_array_start));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
// Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@@ -4669,9 +5032,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ map);
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
function);
} else {
// The constants for the code patching are based on no push instructions
@@ -4694,10 +5058,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
Label loop, is_instance, is_not_instance;
__ bind(&loop);
- __ cmp(scratch, Operand(prototype));
+ __ cmp(scratch, prototype);
__ j(equal, &is_instance, Label::kNear);
Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(scratch), Immediate(factory->null_value()));
+ __ cmp(scratch, Immediate(factory->null_value()));
__ j(equal, &is_not_instance, Label::kNear);
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
@@ -4708,7 +5072,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(0));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch,
- times_pointer_size, roots_address), eax);
+ times_pointer_size, roots_array_start), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@@ -4730,7 +5094,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(
- scratch, times_pointer_size, roots_address), eax);
+ scratch, times_pointer_size, roots_array_start), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@@ -4788,13 +5152,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
// Call the builtin and convert 0/1 to true/false.
- __ EnterInternalFrame();
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(object);
+ __ push(function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
Label true_value, done;
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, &true_value, Label::kNear);
__ mov(eax, factory->false_value());
__ jmp(&done, Label::kNear);
@@ -4854,11 +5219,6 @@ void CompareStub::PrintName(StringStream* stream) {
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
// If the receiver is a smi trigger the non-string case.
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(object_, receiver_not_string_);
@@ -4873,85 +5233,26 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
- __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
__ j(above_equal, index_out_of_range_);
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
+ __ SmiUntag(index_);
- // Handle non-flat strings.
- __ and_(result_, kStringRepresentationMask);
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(result_, kExternalStringTag);
- __ j(greater, &sliced_string, Label::kNear);
- __ j(equal, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(masm->isolate()->factory()->empty_string()));
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string, Label::kNear);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
- // Assure that we are dealing with a sequential string. Go to runtime if not.
- __ bind(&assure_seq_string);
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
- __ jmp(&flat_string, Label::kNear);
+ Factory* factory = masm->isolate()->factory();
+ StringCharLoadGenerator::Generate(
+ masm, factory, object_, index_, result_, &call_runtime_);
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string, Label::kNear);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzx_w(result_, FieldOperand(object_,
- scratch_, times_1, // Scratch is smi-tagged.
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code, Label::kNear);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiUntag(scratch_);
- __ movzx_b(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
__ SmiTag(result_);
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
@@ -4963,7 +5264,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
- __ push(index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4972,12 +5272,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(eax)) {
+ if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ mov(scratch_, eax);
+ __ mov(index_, eax);
}
- __ pop(index_);
__ pop(object_);
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -4985,7 +5284,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4995,6 +5294,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
__ push(object_);
+ __ SmiTag(index_);
__ push(index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
if (!result_.is(eax)) {
@@ -5036,7 +5336,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5063,7 +5364,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -5110,7 +5412,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in eax.
Counters* counters = masm->isolate()->counters();
@@ -5119,7 +5421,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in edx.
__ mov(eax, edx);
@@ -5134,13 +5436,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
- __ add(ebx, Operand(ecx));
+ __ add(ebx, ecx);
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime);
// Use the symbol table when adding two one character strings, as it
// helps later optimizations to return a symbol here.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+ __ cmp(ebx, Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
// Check that both strings are non-external ascii strings.
@@ -5177,7 +5479,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&string_add_runtime);
// Pack both characters in ebx.
__ shl(ecx, kBitsPerByte);
- __ or_(ebx, Operand(ecx));
+ __ or_(ebx, ecx);
// Set the characters in the new string.
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
@@ -5185,7 +5487,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
__ j(below, &string_add_flat_result);
// If result is not supposed to be flat allocate a cons string object. If both
@@ -5195,7 +5497,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, Operand(edi));
+ __ and_(ecx, edi);
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ecx, Immediate(kStringEncodingMask));
@@ -5223,7 +5525,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &ascii_data);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, Operand(ecx));
+ __ xor_(edi, ecx);
STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
@@ -5271,12 +5573,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
// edx: first char of first argument
@@ -5286,7 +5588,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5310,13 +5612,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(Operand(ecx),
+ __ add(ecx,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx),
+ __ add(edx,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
@@ -5327,7 +5629,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5403,15 +5705,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
if (ascii) {
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
+ __ add(src, Immediate(1));
+ __ add(dest, Immediate(1));
} else {
__ mov_w(scratch, Operand(src, 0));
__ mov_w(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(2));
- __ add(Operand(dest), Immediate(2));
+ __ add(src, Immediate(2));
+ __ add(dest, Immediate(2));
}
- __ sub(Operand(count), Immediate(1));
+ __ sub(count, Immediate(1));
__ j(not_zero, &loop);
}
@@ -5434,7 +5736,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Nothing to do for zero characters.
Label done;
- __ test(count, Operand(count));
+ __ test(count, count);
__ j(zero, &done);
// Make count the number of bytes to copy.
@@ -5459,7 +5761,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Check if there are more bytes to copy.
__ bind(&last_bytes);
- __ test(count, Operand(count));
+ __ test(count, count);
__ j(zero, &done);
// Copy remaining characters.
@@ -5467,9 +5769,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- __ sub(Operand(count), Immediate(1));
+ __ add(src, Immediate(1));
+ __ add(dest, Immediate(1));
+ __ sub(count, Immediate(1));
__ j(not_zero, &loop);
__ bind(&done);
@@ -5491,12 +5793,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index;
__ mov(scratch, c1);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ sub(scratch, Immediate(static_cast<int>('0')));
+ __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(above, &not_array_index, Label::kNear);
__ mov(scratch, c2);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ sub(scratch, Immediate(static_cast<int>('0')));
+ __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_probed);
__ bind(&not_array_index);
@@ -5509,24 +5811,24 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Collect the two characters in a register.
Register chars = c1;
__ shl(c2, kBitsPerByte);
- __ or_(chars, Operand(c2));
+ __ or_(chars, c2);
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
// Load the symbol table.
Register symbol_table = c2;
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
__ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
__ mov(symbol_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
// Calculate capacity mask from the symbol table capacity.
Register mask = scratch2;
__ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ SmiUntag(mask);
- __ sub(Operand(mask), Immediate(1));
+ __ sub(mask, Immediate(1));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
@@ -5539,16 +5841,16 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
static const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+ Register candidate = scratch; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
// Calculate entry in symbol table.
__ mov(scratch, hash);
if (i > 0) {
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+ __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(mask));
+ __ and_(scratch, mask);
// Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ mov(candidate,
FieldOperand(symbol_table,
@@ -5560,7 +5862,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Factory* factory = masm->isolate()->factory();
__ cmp(candidate, factory->undefined_value());
__ j(equal, not_found);
- __ cmp(candidate, factory->null_value());
+ __ cmp(candidate, factory->the_hole_value());
__ j(equal, &next_probe[i]);
// If length is not 2 the string is not a candidate.
@@ -5582,7 +5884,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
__ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
__ and_(temp, 0x0000ffff);
- __ cmp(chars, Operand(temp));
+ __ cmp(chars, temp);
__ j(equal, &found_in_symbol_table);
__ bind(&next_probe_pop_mask[i]);
__ pop(mask);
@@ -5593,7 +5895,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ jmp(not_found);
// Scratch register contains result when we fall through to here.
- Register result = scratch;
+ Register result = candidate;
__ bind(&found_in_symbol_table);
__ pop(mask); // Pop saved mask from the stack.
if (!result.is(eax)) {
@@ -5609,11 +5911,11 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
// hash = character + (character << 10);
__ mov(hash, character);
__ shl(hash, 10);
- __ add(hash, Operand(character));
+ __ add(hash, character);
// hash ^= hash >> 6;
__ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
+ __ shr(scratch, 6);
+ __ xor_(hash, scratch);
}
@@ -5622,15 +5924,15 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Register character,
Register scratch) {
// hash += character;
- __ add(hash, Operand(character));
+ __ add(hash, character);
// hash += hash << 10;
__ mov(scratch, hash);
__ shl(scratch, 10);
- __ add(hash, Operand(scratch));
+ __ add(hash, scratch);
// hash ^= hash >> 6;
__ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
+ __ shr(scratch, 6);
+ __ xor_(hash, scratch);
}
@@ -5640,19 +5942,22 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// hash += hash << 3;
__ mov(scratch, hash);
__ shl(scratch, 3);
- __ add(hash, Operand(scratch));
+ __ add(hash, scratch);
// hash ^= hash >> 11;
__ mov(scratch, hash);
- __ sar(scratch, 11);
- __ xor_(hash, Operand(scratch));
+ __ shr(scratch, 11);
+ __ xor_(hash, scratch);
// hash += hash << 15;
__ mov(scratch, hash);
__ shl(scratch, 15);
- __ add(hash, Operand(scratch));
+ __ add(hash, scratch);
+
+ uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+ __ and_(hash, kHashShiftCutOffMask);
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ test(hash, Operand(hash));
+ __ test(hash, hash);
__ j(not_zero, &hash_not_zero, Label::kNear);
__ mov(hash, Immediate(27));
__ bind(&hash_not_zero);
@@ -5684,7 +5989,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
__ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, Operand(edx));
+ __ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
__ j(equal, &return_eax);
@@ -5735,18 +6040,15 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// ebx: instance type
// ecx: sub string length
// edx: from index (smi)
- Label allocate_slice, sliced_string, seq_string;
+ Label allocate_slice, sliced_string, seq_or_external_string;
__ cmp(ecx, SlicedString::kMinLength);
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(ebx, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
+ // If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ test(ebx, Immediate(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ j(zero, &runtime);
+ __ j(zero, &seq_or_external_string, Label::kNear);
Factory* factory = masm->isolate()->factory();
__ test(ebx, Immediate(kSlicedNotConsMask));
@@ -5764,8 +6066,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
__ jmp(&allocate_slice, Label::kNear);
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
__ mov(edi, eax);
__ bind(&allocate_slice);
@@ -5816,13 +6118,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
__ SmiUntag(ebx);
- __ add(esi, Operand(ebx));
+ __ add(esi, ebx);
// eax: result string
// ecx: result length
@@ -5851,18 +6153,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(Operand(edi),
+ __ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, Operand(ebx));
+ __ add(esi, ebx);
// eax: result string
// ecx: result length
@@ -5902,7 +6203,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ test(length, Operand(length));
+ __ test(length, length);
__ j(not_zero, &compare_chars, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -5937,14 +6238,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ j(less_equal, &left_shorter, Label::kNear);
// Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
+ __ sub(scratch1, length_delta);
__ bind(&left_shorter);
Register min_length = scratch1;
// If either length is zero, just compare lengths.
Label compare_lengths;
- __ test(min_length, Operand(min_length));
+ __ test(min_length, min_length);
__ j(zero, &compare_lengths, Label::kNear);
// Compare characters.
@@ -5954,7 +6255,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
+ __ test(length_delta, length_delta);
__ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
@@ -6003,7 +6304,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
__ mov_b(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ j(not_zero, &loop);
}
@@ -6020,7 +6321,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right
Label not_same;
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -6036,7 +6337,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Compare flat ascii strings.
// Drop arguments from the stack.
__ pop(ecx);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ add(esp, Immediate(2 * kPointerSize));
__ push(ecx);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
@@ -6050,16 +6351,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
__ JumpIfNotSmi(ecx, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ sub(eax, Operand(edx));
+ __ sub(eax, edx);
} else {
Label done;
- __ sub(edx, Operand(eax));
+ __ sub(edx, eax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ not_(edx);
@@ -6079,8 +6380,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Label generic_stub;
Label unordered;
Label miss;
- __ mov(ecx, Operand(edx));
- __ and_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
@@ -6108,9 +6409,9 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Performing mov, because xor would destroy the flag register.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
+ __ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
+ __ cmov(below, eax, ecx);
__ ret(0);
__ bind(&unordered);
@@ -6137,9 +6438,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
Label miss;
- __ mov(tmp1, Operand(left));
+ __ mov(tmp1, left);
STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, Operand(right));
+ __ and_(tmp1, right);
__ JumpIfSmi(tmp1, &miss, Label::kNear);
// Check that both operands are symbols.
@@ -6148,13 +6449,13 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, Operand(tmp2));
+ __ and_(tmp1, tmp2);
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &miss, Label::kNear);
// Symbols are compared by identity.
Label done;
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
@@ -6183,9 +6484,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
Register tmp3 = edi;
// Check that both operands are heap objects.
- __ mov(tmp1, Operand(left));
+ __ mov(tmp1, left);
STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, Operand(right));
+ __ and_(tmp1, right);
__ JumpIfSmi(tmp1, &miss);
// Check that both operands are strings. This leaves the instance
@@ -6196,13 +6497,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ mov(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, Operand(tmp2));
+ __ or_(tmp3, tmp2);
__ test(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -6216,7 +6517,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// because we already know they are not identical.
Label do_compare;
STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, Operand(tmp2));
+ __ and_(tmp1, tmp2);
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
@@ -6249,8 +6550,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
- __ mov(ecx, Operand(edx));
- __ and_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
@@ -6259,7 +6560,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ sub(eax, Operand(edx));
+ __ sub(eax, edx);
__ ret(0);
__ bind(&miss);
@@ -6274,15 +6575,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(eax);
__ push(ecx);
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
- __ EnterInternalFrame();
- __ push(edx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ masm->isolate());
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -6294,7 +6596,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(ecx);
// Do a tail call to the rewritten stub.
- __ jmp(Operand(edi));
+ __ jmp(edi);
}
@@ -6303,13 +6605,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0) {
ASSERT(name->IsSymbol());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
@@ -6323,8 +6624,8 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
- __ and_(Operand(index),
- Immediate(Smi::FromInt(name->Hash() +
+ __ and_(index,
+ Immediate(Smi::FromInt(name->Hash() +
StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
@@ -6355,12 +6656,10 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
StringDictionaryLookupStub::NEGATIVE_LOOKUP);
__ push(Immediate(Handle<Object>(name)));
__ push(Immediate(name->Hash()));
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
- __ test(r0, Operand(r0));
+ __ CallStub(&stub);
+ __ test(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
- return result;
}
@@ -6375,6 +6674,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register r0,
Register r1) {
+ ASSERT(!elements.is(r0));
+ ASSERT(!elements.is(r1));
+ ASSERT(!name.is(r0));
+ ASSERT(!name.is(r1));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -6390,9 +6694,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ mov(r0, FieldOperand(name, String::kHashFieldOffset));
__ shr(r0, String::kHashShift);
if (i > 0) {
- __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r0, Operand(r1));
+ __ and_(r0, r1);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
@@ -6416,13 +6720,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ push(r0);
__ CallStub(&stub);
- __ test(r1, Operand(r1));
+ __ test(r1, r1);
__ j(zero, miss);
__ jmp(done);
}
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
// esp[0 * kPointerSize]: return address.
// esp[1 * kPointerSize]: key's hash.
@@ -6453,8 +6759,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(scratch, Operand(esp, 2 * kPointerSize));
if (i > 0) {
- __ add(Operand(scratch),
- Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
}
__ and_(scratch, Operand(esp, 0));
@@ -6510,6 +6815,364 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { ebx, eax, edi, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+ { ebx, edi, edx, OMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal and CallFunctionStub.
+ { ebx, ecx, edx, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField and
+ // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { edx, ecx, ebx, EMIT_REMEMBERED_SET },
+ // GenerateStoreField calls the stub with two different permutations of
+ // registers. This is the second.
+ { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+ // StoreIC::GenerateNormal via GenerateDictionaryStore
+ { ebx, edi, edx, EMIT_REMEMBERED_SET },
+ // KeyedStoreIC::GenerateGeneric.
+ { ebx, edx, ecx, EMIT_REMEMBERED_SET},
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { edi, edx, ecx, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { edx, ebx, edi, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { eax, edx, esi, EMIT_REMEMBERED_SET},
+ { edx, eax, edi, EMIT_REMEMBERED_SET},
+ // StoreArrayLiteralElementStub::Generate
+ { ebx, eax, ecx, EMIT_REMEMBERED_SET},
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+
+ CpuFeatures::TryForceFeatureScope scope(SSE2);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+ __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ masm->set_byte_at(0, kTwoByteNopInstruction);
+ masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm,
+ kReturnOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
+ }
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label object_is_black, need_incremental, need_incremental_pop_object;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &object_is_black,
+ Label::kNear);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&object_is_black);
+
+ // Get the value from the slot.
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ not_zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ jmp(&need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need an extra register for this, so we push the object register
+ // temporarily.
+ __ push(regs_.object());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object,
+ Label::kNear);
+ __ pop(regs_.object());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&need_incremental_pop_object);
+ __ pop(regs_.object());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : element value to store
+ // -- ebx : array literal
+ // -- edi : map of array literal
+ // -- ecx : element index as smi
+ // -- edx : array literal index in function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label slow_elements_from_double;
+ Label fast_elements;
+
+ __ CheckFastElements(edi, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(eax, &smi_element);
+ __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+
+ __ bind(&slow_elements);
+ __ pop(edi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(ebx);
+ __ push(ecx);
+ __ push(eax);
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(edx);
+ __ push(edi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ __ bind(&slow_elements_from_double);
+ __ pop(edx);
+ __ jmp(&slow_elements);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ mov(Operand(ecx, 0), eax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, ecx, eax,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize), eax);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ push(edx);
+ __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(eax,
+ edx,
+ ecx,
+ edi,
+ xmm0,
+ &slow_elements_from_double,
+ false);
+ __ pop(edx);
+ __ ret(0);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index fa255da1f..4d23c3a17 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -60,6 +60,25 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated() { return true; }
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -128,7 +147,7 @@ class UnaryOpStub: public CodeStub {
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -215,7 +234,7 @@ class BinaryOpStub: public CodeStub {
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
@@ -402,13 +421,12 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0);
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
@@ -418,6 +436,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -430,7 +450,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -451,6 +471,265 @@ class StringDictionaryLookupStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
+
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+
+ static Mode GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 7);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers, where the third
+ // is always ecx (needed for shift operations). The input is two registers
+ // that must be preserved and one scratch register provided by the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_orig_(object),
+ address_orig_(address),
+ scratch0_orig_(scratch0),
+ object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
+ if (scratch0.is(ecx)) {
+ scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
+ }
+ if (object.is(ecx)) {
+ object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
+ }
+ if (address.is(ecx)) {
+ address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
+ }
+ ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!address_orig_.is(object_));
+ ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+ // We don't have to save scratch0_orig_ because it was given to us as
+ // a scratch register. But if we had to switch to a different reg then
+ // we should save the new scratch0_.
+ if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!ecx.is(scratch0_orig_) &&
+ !ecx.is(object_orig_) &&
+ !ecx.is(address_orig_)) {
+ masm->push(ecx);
+ }
+ masm->push(scratch1_);
+ if (!address_.is(address_orig_)) {
+ masm->push(address_);
+ masm->mov(address_, address_orig_);
+ }
+ if (!object_.is(object_orig_)) {
+ masm->push(object_);
+ masm->mov(object_, object_orig_);
+ }
+ }
+
+ void Restore(MacroAssembler* masm) {
+ // These will have been preserved the entire time, so we just need to move
+ // them back. Only in one case is the orig_ reg different from the plain
+ // one, since only one of them can alias with ecx.
+ if (!object_.is(object_orig_)) {
+ masm->mov(object_orig_, object_);
+ masm->pop(object_);
+ }
+ if (!address_.is(address_orig_)) {
+ masm->mov(address_orig_, address_);
+ masm->pop(address_);
+ }
+ masm->pop(scratch1_);
+ if (!ecx.is(scratch0_orig_) &&
+ !ecx.is(object_orig_) &&
+ !ecx.is(address_orig_)) {
+ masm->pop(ecx);
+ }
+ if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The caller saved
+ // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
+ if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ masm->sub(esp,
+ Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ // Save all XMM registers except XMM0.
+ for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ }
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ // Restore all XMM registers except XMM0.
+ for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ }
+ masm->add(esp,
+ Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ }
+ if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
+ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_orig_;
+ Register address_orig_;
+ Register scratch0_orig_;
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ // Third scratch register is always ecx.
+
+ Register GetRegThatIsNotEcxOr(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ }
+;
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 3> {};
+ class ValueBits: public BitField<int, 3, 3> {};
+ class AddressBits: public BitField<int, 6, 3> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ RegisterAllocation regs_;
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 3a657bd54..e5ca02c47 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -39,12 +40,16 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
}
@@ -108,14 +113,14 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
- __ add(Operand(edx), Immediate(16));
- __ add(dst, Operand(edx));
- __ add(src, Operand(edx));
- __ sub(Operand(count), edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
- __ test(Operand(src), Immediate(0x0F));
+ __ test(src, Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
// Copy loop for aligned source and destination.
@@ -130,11 +135,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
- __ add(Operand(src), Immediate(0x20));
+ __ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
- __ add(Operand(dst), Immediate(0x20));
+ __ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@@ -142,12 +147,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
- __ test(Operand(count), Immediate(0x10));
+ __ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
- __ add(Operand(src), Immediate(0x10));
+ __ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
- __ add(Operand(dst), Immediate(0x10));
+ __ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -176,11 +181,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
- __ add(Operand(src), Immediate(0x20));
+ __ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
- __ add(Operand(dst), Immediate(0x20));
+ __ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@@ -188,12 +193,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
- __ test(Operand(count), Immediate(0x10));
+ __ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0));
- __ add(Operand(src), Immediate(0x10));
+ __ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
- __ add(Operand(dst), Immediate(0x10));
+ __ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -228,10 +233,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
- __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, Operand(edx));
- __ add(src, Operand(edx));
- __ sub(Operand(count), edx);
+ __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);
@@ -261,6 +266,370 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#undef __
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ __ push(eax);
+ __ push(ebx);
+
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ // edx: receiver
+ // edi: length of source FixedArray (smi-tagged)
+ __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
+ __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
+
+ // eax: destination FixedDoubleArray
+ // edi: number of elements
+ // edx: receiver
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+ __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
+ __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+ __ mov(ebx, eax);
+ __ RecordWriteField(edx,
+ JSObject::kElementsOffset,
+ ebx,
+ edi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
+
+ // Prepare for conversion loop.
+ ExternalReference canonical_the_hole_nan_reference =
+ ExternalReference::address_of_the_hole_nan();
+ XMMRegister the_hole_nan = xmm1;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(the_hole_nan,
+ Operand::StaticVariable(canonical_the_hole_nan_reference));
+ }
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ // Restore registers before jumping into runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(ebx);
+ __ pop(eax);
+ __ jmp(fail);
+
+ // Convert and copy elements
+ // esi: source FixedArray
+ __ bind(&loop);
+ __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
+ // ebx: current element from source
+ // edi: index of current element
+ __ JumpIfNotSmi(ebx, &convert_hole);
+
+ // Normal smi, convert it to double and store.
+ __ SmiUntag(ebx);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ cvtsi2sd(xmm0, ebx);
+ __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ } else {
+ __ push(ebx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ebx);
+ __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+ }
+ __ jmp(&entry);
+
+ // Found hole, store hole_nan_as_double instead.
+ __ bind(&convert_hole);
+
+ if (FLAG_debug_code) {
+ __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+ __ Assert(equal, "object found in smi-only array");
+ }
+
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ the_hole_nan);
+ } else {
+ __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
+ __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+ }
+
+ __ bind(&entry);
+ __ sub(edi, Immediate(Smi::FromInt(1)));
+ __ j(not_sign, &loop);
+
+ __ pop(ebx);
+ __ pop(eax);
+ // eax: value
+ // ebx: target map
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Restore esi.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ __ push(eax);
+ __ push(edx);
+ __ push(ebx);
+
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ // ebx: length of source FixedDoubleArray (smi-tagged)
+ __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+
+ // eax: destination FixedArray
+ // ebx: number of elements
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(eax);
+ __ jmp(fail);
+
+ // Box doubles into heap numbers.
+ // edi: source FixedDoubleArray
+ // eax: destination FixedArray
+ __ bind(&loop);
+ // ebx: index of current element (smi-tagged)
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(equal, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
+ // edx: new heap number
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ movdbl(xmm0,
+ FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+ __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+ __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
+ __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
+ __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
+ }
+ __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
+ __ mov(esi, ebx);
+ __ RecordWriteArray(eax,
+ edx,
+ esi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&entry, Label::kNear);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
+ masm->isolate()->factory()->the_hole_value());
+
+ __ bind(&entry);
+ __ sub(ebx, Immediate(Smi::FromInt(1)));
+ __ j(not_sign, &loop);
+
+ __ pop(ebx);
+ __ pop(edx);
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+ __ RecordWriteField(edx,
+ JSObject::kElementsOffset,
+ eax,
+ edi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Restore registers.
+ __ pop(eax);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Factory* factory,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ test(result, Immediate(kIsIndirectStringMask));
+ __ j(zero, &check_sequential, Label::kNear);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ test(result, Immediate(kSlicedNotConsMask));
+ __ j(zero, &cons_string, Label::kNear);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+ __ SmiUntag(result);
+ __ add(index, result);
+ __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded, Label::kNear);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+ Immediate(factory->empty_string()));
+ __ j(not_equal, call_runtime);
+ __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label seq_string;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &seq_string, Label::kNear);
+
+ // Handle external strings.
+ Label ascii_external, done;
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ test(result, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, "external string expected, but not found");
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ test_b(result, kShortExternalStringMask);
+ __ j(not_zero, call_runtime);
+ // Check encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ test_b(result, kStringEncodingMask);
+ __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ j(not_equal, &ascii_external, Label::kNear);
+ // Two-byte string.
+ __ movzx_w(result, Operand(result, index, times_2, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&ascii_external);
+ // Ascii string.
+ __ movzx_b(result, Operand(result, index, times_1, 0));
+ __ jmp(&done, Label::kNear);
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii;
+ __ bind(&seq_string);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ test(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii, Label::kNear);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ __ movzx_w(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ // Ascii string.
+ // Load the byte into the result register.
+ __ bind(&ascii);
+ __ movzx_b(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&done);
+}
+
+#undef __
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index c85fa83e9..f4ab0b50f 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -72,6 +72,22 @@ class CodeGenerator {
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Factory* factory,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 238994886..264956078 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -100,63 +100,64 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, "Unable to encode value as smi");
+ }
+ __ SmiTag(reg);
+ __ push(reg);
}
- __ SmiTag(reg);
- __ push(reg);
}
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values containing object pointers from the expression
- // stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
+ __ Set(eax, Immediate(0)); // No arguments.
+ __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values containing object pointers from the
+ // expression stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, Immediate(kDebugZapValue));
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
+ }
}
- }
- // Get rid of the internal frame.
- __ LeaveInternalFrame();
+ // Get rid of the internal frame.
+ }
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ add(esp, Immediate(kPointerSize));
}
// Now that the break point has been handled, resume normal execution by
@@ -243,12 +244,12 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- edi: function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
+ Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
}
@@ -298,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(Operand(edx));
+ __ jmp(edx);
}
const bool Debug::kFrameDropperSupported = true;
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index e23f3e9ef..eeee4f2b7 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -45,16 +45,6 @@ int Deoptimizer::patch_size() {
}
-static void ZapCodeRange(Address start, Address end) {
-#ifdef DEBUG
- ASSERT(start <= end);
- int size = end - start;
- CodePatcher destroyer(start, size);
- while (size-- > 0) destroyer.masm()->int3();
-#endif
-}
-
-
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Isolate* isolate = code->GetIsolate();
HandleScope scope(isolate);
@@ -62,30 +52,23 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Compute the size of relocation information needed for the code
// patching in Deoptimizer::DeoptimizeFunction.
int min_reloc_size = 0;
- Address prev_reloc_address = code->instruction_start();
- Address code_start_address = code->instruction_start();
- SafepointTable table(*code);
- for (unsigned i = 0; i < table.length(); ++i) {
- Address curr_reloc_address = code_start_address + table.GetPcOffset(i);
- ASSERT_GE(curr_reloc_address, prev_reloc_address);
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- // The gap code is needed to get to the state expected at the
- // bailout and we need to skip the call opcode to get to the
- // address that needs reloc.
- curr_reloc_address += safepoint_entry.gap_code_size() + 1;
- int pc_delta = curr_reloc_address - prev_reloc_address;
- // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
- // if encodable with small pc delta encoding and up to 6 bytes
- // otherwise.
- if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
- min_reloc_size += 2;
- } else {
- min_reloc_size += 6;
- }
- prev_reloc_address = curr_reloc_address;
+ int prev_pc_offset = 0;
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ int pc_offset = deopt_data->Pc(i)->value();
+ if (pc_offset == -1) continue;
+ ASSERT_GE(pc_offset, prev_pc_offset);
+ int pc_delta = pc_offset - prev_pc_offset;
+ // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
+ // if encodable with small pc delta encoding and up to 6 bytes
+ // otherwise.
+ if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
+ min_reloc_size += 2;
+ } else {
+ min_reloc_size += 6;
}
+ prev_pc_offset = pc_offset;
}
// If the relocation information is not big enough we create a new
@@ -116,7 +99,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
+ RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@@ -150,40 +133,41 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
- // For each return after a safepoint insert a call to the corresponding
- // deoptimization entry. Since the call is a relative encoding, write new
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+
+ // Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
- SafepointTable table(code);
- Address prev_address = code_start_address;
- for (unsigned i = 0; i < table.length(); ++i) {
- Address curr_address = code_start_address + table.GetPcOffset(i);
- ASSERT_GE(curr_address, prev_address);
- ZapCodeRange(prev_address, curr_address);
-
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- // The gap code is needed to get to the state expected at the bailout.
- curr_address += safepoint_entry.gap_code_size();
-
- CodePatcher patcher(curr_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE);
-
- // We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
- RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry));
- reloc_info_writer.Write(&rinfo);
- ASSERT_GE(reloc_info_writer.pos(),
- reloc_info->address() + ByteArray::kHeaderSize);
- curr_address += patch_size();
- }
- prev_address = curr_address;
+ //
+ // Emit call to lazy deoptimization at all lazy deopt points.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ // Patch lazy deoptimization entry.
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ CodePatcher patcher(call_address, patch_size());
+ Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+ // We use RUNTIME_ENTRY for deoptimization bailouts.
+ RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo::RUNTIME_ENTRY,
+ reinterpret_cast<intptr_t>(deopt_entry),
+ NULL);
+ reloc_info_writer.Write(&rinfo);
+ ASSERT_GE(reloc_info_writer.pos(),
+ reloc_info->address() + ByteArray::kHeaderSize);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
}
- ZapCodeRange(prev_address,
- code_start_address + code->safepoint_table_offset());
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
@@ -205,6 +189,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -212,16 +201,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
}
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -250,10 +235,14 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -268,6 +257,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, check_code);
}
@@ -415,7 +407,14 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+ // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+ int frame_pointer = input_->GetRegister(ebp.code());
+ if ((frame_pointer & 0x4) == 0) {
+ // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
+ frame_pointer -= kPointerSize;
+ has_alignment_padding_ = 1;
+ }
+ output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -480,9 +479,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -533,7 +534,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+ ASSERT(!is_bottommost ||
+ input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
+ == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@@ -638,7 +641,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
- __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+ __ sub(esp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
@@ -662,7 +665,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
- __ sub(edx, Operand(ebp));
+ __ sub(edx, ebp);
__ neg(edx);
// Allocate a new deoptimizer object.
@@ -675,7 +678,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
@@ -698,15 +704,15 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
- __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
+ __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
} else {
- __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, Operand(esp));
+ __ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
@@ -715,18 +721,43 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
- __ add(Operand(edx), Immediate(sizeof(uint32_t)));
- __ cmp(ecx, Operand(esp));
+ __ add(edx, Immediate(sizeof(uint32_t)));
+ __ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
+ // If frame was dynamically aligned, pop padding.
+ Label sentinel, sentinel_done;
+ __ pop(ecx);
+ __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+ __ j(equal, &sentinel);
+ __ push(ecx);
+ __ jmp(&sentinel_done);
+ __ bind(&sentinel);
+ __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(1));
+ __ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
+ }
__ pop(eax);
+ if (type() == OSR) {
+ // If alignment padding is added, push the sentinel.
+ Label no_osr_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_osr_padding, Label::kNear);
+ __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+ __ bind(&no_osr_padding);
+ }
+
+
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
@@ -739,12 +770,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
- __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+ __ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
- __ add(Operand(eax), Immediate(kPointerSize));
- __ cmp(eax, Operand(edx));
+ __ add(eax, Immediate(kPointerSize));
+ __ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index a936277b2..da2239011 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -55,6 +55,7 @@ struct ByteMnemonic {
static const ByteMnemonic two_operands_instr[] = {
+ {0x01, "add", OPER_REG_OP_ORDER},
{0x03, "add", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
@@ -117,6 +118,19 @@ static const ByteMnemonic short_immediate_instr[] = {
};
+// Generally we don't want to generate these because they are subject to partial
+// register stalls. They are included for completeness and because the cmp
+// variant is used by the RecordWrite stub. Because it does not update the
+// register it is not subject to partial register stalls.
+static ByteMnemonic byte_immediate_instr[] = {
+ {0x0c, "or", UNSET_OP_ORDER},
+ {0x24, "and", UNSET_OP_ORDER},
+ {0x34, "xor", UNSET_OP_ORDER},
+ {0x3c, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc",
/*4*/ "jz", "jnz", "jna", "ja",
@@ -149,7 +163,8 @@ enum InstructionType {
REGISTER_INSTR,
MOVE_REG_INSTR,
CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR
+ SHORT_IMMEDIATE_INSTR,
+ BYTE_IMMEDIATE_INSTR
};
@@ -164,6 +179,10 @@ class InstructionTable {
public:
InstructionTable();
const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+ static InstructionTable* get_instance() {
+ static InstructionTable table;
+ return &table;
+ }
private:
InstructionDesc instructions_[256];
@@ -198,6 +217,7 @@ void InstructionTable::Init() {
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
CopyTable(call_jump_instr, CALL_JUMP_INSTR);
CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+ CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
AddJumpConditionalShort();
SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@@ -243,15 +263,13 @@ void InstructionTable::AddJumpConditionalShort() {
}
-static InstructionTable instruction_table;
-
-
// The IA32 disassembler implementation.
class DisassemblerIA32 {
public:
DisassemblerIA32(const NameConverter& converter,
bool abort_on_unimplemented = true)
: converter_(converter),
+ instruction_table_(InstructionTable::get_instance()),
tmp_buffer_pos_(0),
abort_on_unimplemented_(abort_on_unimplemented) {
tmp_buffer_[0] = '\0';
@@ -265,11 +283,11 @@ class DisassemblerIA32 {
private:
const NameConverter& converter_;
+ InstructionTable* instruction_table_;
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
unsigned int tmp_buffer_pos_;
bool abort_on_unimplemented_;
-
enum {
eax = 0,
ecx = 1,
@@ -868,7 +886,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
bool processed = true; // Will be set to false if the current instruction
// is not in 'instructions' table.
- const InstructionDesc& idesc = instruction_table.Get(*data);
+ const InstructionDesc& idesc = instruction_table_->Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
AppendToBuffer(idesc.mnem);
@@ -912,6 +930,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
+ case BYTE_IMMEDIATE_INSTR: {
+ AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+ data += 2;
+ break;
+ }
+
case NO_INSTR:
processed = false;
break;
@@ -1346,11 +1370,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
- case 0x2C:
- AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
- data += 2;
- break;
-
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 2f1b2a96d..45b847aec 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -34,37 +34,37 @@ namespace internal {
// Register lists
// Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 8;
+const int kNumRegs = 8;
// Caller-saved registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
1 << 0 | // eax
1 << 1 | // ecx
1 << 2 | // edx
1 << 3 | // ebx - used as a caller-saved register in JavaScript code
1 << 7; // edi - callee function
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 8;
+const int kNumSafepointRegisters = 8;
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kContextOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
- static const int kStateOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = kFPOffset + kPointerSize;
};
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index ca6ce6e31..ef4f0c5f2 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -44,11 +44,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -122,6 +117,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -136,17 +133,26 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// with undefined when called as functions (without an explicit
// receiver object). ecx is zero for method calls and non-zero for
// function calls.
- if (info->is_strict_mode() || info->is_native()) {
+ if (!info->is_classic_mode() || info->is_native()) {
Label ok;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(ecx, Operand(esp, receiver_offset));
+ __ JumpIfSmi(ecx, &ok);
+ __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
+ __ j(not_equal, &ok, Label::kNear);
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -164,11 +170,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
}
- set_stack_height(2 + scope()->num_stack_slots());
- if (FLAG_verify_stack_height) {
- verify_stack_height();
- }
-
bool function_in_register = true;
// Possibly allocate a local context.
@@ -200,11 +201,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering esi.
- __ mov(ecx, esi);
- __ RecordWrite(ecx, context_offset, eax, ebx);
+ // Update the write barrier. This clobbers eax and ebx.
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
}
}
}
@@ -230,7 +232,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict_mode()) {
+ if (!is_classic_mode()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -260,7 +262,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -363,15 +368,6 @@ void FullCodeGenerator::EmitReturnSequence() {
}
-void FullCodeGenerator::verify_stack_height() {
- ASSERT(FLAG_verify_stack_height);
- __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
- __ cmp(ebp, Operand(esp));
- __ Assert(equal, "Full codegen stack height not as expected.");
- __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
}
@@ -388,14 +384,13 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
MemOperand operand = codegen()->VarOperand(var, result_register());
// Memory operands can be pushed directly.
__ push(operand);
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -442,12 +437,11 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
} else {
__ push(Immediate(lit));
}
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -480,7 +474,6 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
ASSERT(count > 0);
__ Drop(count);
- codegen()->decrement_stack_height(count);
}
@@ -490,7 +483,6 @@ void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
ASSERT(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->decrement_stack_height(count);
}
@@ -499,7 +491,6 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
__ mov(Operand(esp, 0), reg);
- codegen()->decrement_stack_height(count - 1);
}
@@ -509,9 +500,8 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
- codegen()->decrement_stack_height(count);
}
@@ -545,7 +535,6 @@ void FullCodeGenerator::StackValueContext::Plug(
__ bind(materialize_false);
__ push(Immediate(isolate()->factory()->false_value()));
__ bind(&done);
- codegen()->increment_stack_height();
}
@@ -573,12 +562,11 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
? isolate()->factory()->true_value()
: isolate()->factory()->false_value();
__ push(Immediate(value));
- codegen()->increment_stack_height();
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -597,7 +585,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub, condition->test_id());
- __ test(result_register(), Operand(result_register()));
+ __ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -661,16 +649,17 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ mov(location, src);
+
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
- __ RecordWrite(scratch0, offset, src, scratch1);
+ __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -681,13 +670,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, NULL);
@@ -697,13 +680,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@@ -715,7 +700,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(StackOperand(variable), result_register());
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
@@ -738,11 +723,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register());
- int offset = Context::SlotOffset(variable->index());
- __ mov(ebx, esi);
- __ RecordWrite(ebx, offset, result_register(), ecx);
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ result_register(),
+ ecx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
@@ -755,28 +745,26 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ push(esi);
__ push(Immediate(variable->name()));
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+ ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
- increment_stack_height(3);
if (function != NULL) {
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
- increment_stack_height();
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
- increment_stack_height();
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
- decrement_stack_height(4);
break;
}
}
@@ -801,7 +789,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
- int switch_clause_stack_height = stack_height();
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -835,10 +822,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
- __ or_(ecx, Operand(eax));
+ __ or_(ecx, eax);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -850,7 +837,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -866,7 +853,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(default_clause->body_target());
}
- set_stack_height(switch_clause_stack_height);
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
@@ -908,13 +894,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
__ push(eax);
- increment_stack_height();
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ __ j(below_equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
+ Label next;
__ mov(ecx, eax);
__ bind(&next);
@@ -939,7 +930,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
- __ cmp(ecx, Operand(eax));
+ __ cmp(ecx, eax);
__ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array());
@@ -985,15 +976,21 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register eax. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
- __ push(eax);
+ __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
+ __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
+ __ j(above, &non_proxy);
+ __ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ push(ebx); // Smi
+ __ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
- // 1 ~ The object has already been pushed.
- increment_stack_height(ForIn::kElementCount - 1);
// Generate code for doing the condition check.
__ bind(&loop);
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
@@ -1004,26 +1001,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(ebx, Operand(esp, 2 * kPointerSize));
__ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register edx.
__ mov(edx, Operand(esp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ mov(ecx, Operand(esp, 4 * kPointerSize));
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ test(edx, edx);
+ __ j(zero, &update_each);
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(equal, loop_statement.continue_label());
- __ mov(ebx, Operand(eax));
+ __ mov(ebx, eax);
// Update the 'each' property or variable from the possibly filtered
// entry in register ebx.
@@ -1047,9 +1050,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ add(Operand(esp), Immediate(5 * kPointerSize));
+ __ add(esp, Immediate(5 * kPointerSize));
- decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth.
__ bind(&exit);
decrement_loop_depth();
@@ -1069,7 +1071,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->language_mode());
__ push(Immediate(info));
__ CallStub(&stub);
} else {
@@ -1099,7 +1101,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1113,7 +1115,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1158,7 +1160,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1189,16 +1191,23 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- __ mov(eax, isolate()->factory()->undefined_value());
+ if (local->mode() == CONST) {
+ __ mov(eax, isolate()->factory()->undefined_value());
+ } else { // LET || CONST_HARMONY
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ jmp(done);
}
@@ -1231,23 +1240,63 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == Variable::LET) {
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else { // Variable::CONST
- __ mov(eax, isolate()->factory()->undefined_value());
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(eax, var);
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ mov(eax, isolate()->factory()->undefined_value());
+ }
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
}
- __ bind(&done);
- context()->Plug(eax);
}
+ context()->Plug(var);
break;
}
@@ -1325,10 +1374,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_properties()));
+ __ push(Immediate(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1336,10 +1386,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ push(Immediate(Smi::FromInt(flags)));
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1360,7 +1415,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) {
__ push(eax); // Save result on the stack
result_saved = true;
- increment_stack_height();
}
switch (property->kind()) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1372,9 +1426,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1385,7 +1439,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
- increment_stack_height();
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1394,20 +1447,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
__ Drop(3);
}
- decrement_stack_height(3);
break;
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(Operand(esp, 0)); // Duplicate receiver.
- increment_stack_height();
VisitForStackValue(key);
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
- increment_stack_height();
VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4);
- decrement_stack_height(4);
break;
default: UNREACHABLE();
}
@@ -1432,25 +1481,42 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_elements()));
- if (expr->constant_elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- ASSERT(expr->depth() == 1);
+ __ push(Immediate(constant_elements));
+ Heap* heap = isolate()->heap();
+ if (has_constant_fast_elements &&
+ constant_elements_values->map() == heap->fixed_cow_array_map()) {
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // change, so it's possible to specialize the stub in advance.
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ length);
__ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // change, so it's possible to specialize the stub in advance.
+ FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1470,18 +1536,31 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax);
result_saved = true;
- increment_stack_height();
}
VisitForAccumulatorValue(subexpr);
- // Store the subexpression value in the array's elements.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(FieldOperand(ebx, offset), result_register());
-
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, offset, result_register(), ecx);
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+ // transition and don't need to call the runtime stub.
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ // Store the subexpression value in the array's elements.
+ __ mov(FieldOperand(ebx, offset), result_register());
+ // Update the write barrier for the array store.
+ __ RecordWriteField(ebx, offset, result_register(), ecx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ } else {
+ // Store the subexpression value in the array's elements.
+ __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+ __ mov(ecx, Immediate(Smi::FromInt(i)));
+ __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1499,9 +1578,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// on the left-hand side.
if (!expr->target()->IsValidLeftHandSide()) {
- ASSERT(expr->target()->AsThrow() != NULL);
- VisitInCurrentContext(expr->target()); // Throw does not plug the context
- context()->Plug(eax);
+ VisitForEffect(expr->target());
return;
}
@@ -1526,7 +1603,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
__ push(result_register());
- increment_stack_height();
} else {
VisitForStackValue(property->obj());
}
@@ -1537,7 +1613,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
- increment_stack_height();
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1569,7 +1644,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Token::Value op = expr->binary_op();
__ push(eax); // Left operand goes on the stack.
- increment_stack_height();
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1619,14 +1693,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1639,9 +1713,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack. Right operand is in eax.
Label smi_case, done, stub_call;
__ pop(edx);
- decrement_stack_height();
__ mov(ecx, eax);
- __ or_(eax, Operand(edx));
+ __ or_(eax, edx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
@@ -1691,32 +1764,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
- __ add(eax, Operand(ecx));
+ __ add(eax, ecx);
__ j(overflow, &stub_call);
break;
case Token::SUB:
- __ sub(eax, Operand(ecx));
+ __ sub(eax, ecx);
__ j(overflow, &stub_call);
break;
case Token::MUL: {
__ SmiUntag(eax);
- __ imul(eax, Operand(ecx));
+ __ imul(eax, ecx);
__ j(overflow, &stub_call);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
- __ or_(ebx, Operand(ecx));
+ __ or_(ebx, ecx);
__ j(negative, &stub_call);
break;
}
case Token::BIT_OR:
- __ or_(eax, Operand(ecx));
+ __ or_(eax, ecx);
break;
case Token::BIT_AND:
- __ and_(eax, Operand(ecx));
+ __ and_(eax, ecx);
break;
case Token::BIT_XOR:
- __ xor_(eax, Operand(ecx));
+ __ xor_(eax, ecx);
break;
default:
UNREACHABLE();
@@ -1731,7 +1804,6 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- decrement_stack_height();
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
@@ -1744,9 +1816,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
- ASSERT(expr->AsThrow() != NULL);
- VisitInCurrentContext(expr); // Throw does not plug the context
- context()->Plug(eax);
+ VisitForEffect(expr);
return;
}
@@ -1770,31 +1840,26 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case NAMED_PROPERTY: {
__ push(eax); // Preserve value.
- increment_stack_height();
VisitForAccumulatorValue(prop->obj());
__ mov(edx, eax);
__ pop(eax); // Restore value.
- decrement_stack_height();
__ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic);
break;
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
- increment_stack_height();
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
- decrement_stack_height();
__ pop(eax); // Restore value.
- decrement_stack_height();
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic);
break;
}
@@ -1810,9 +1875,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Global var, const, or let.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -1838,13 +1903,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1859,12 +1924,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
- __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1877,14 +1944,15 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
- __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
@@ -1915,11 +1983,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(edx, Operand(esp, 0));
} else {
__ pop(edx);
- decrement_stack_height();
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1929,7 +1996,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
__ Drop(1);
- decrement_stack_height();
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -1951,18 +2017,16 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
__ pop(ecx);
- decrement_stack_height();
if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else {
__ pop(edx);
- decrement_stack_height();
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1972,7 +2036,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ push(edx);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
- decrement_stack_height();
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -1992,7 +2055,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ pop(edx);
- decrement_stack_height();
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@@ -2019,7 +2081,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
@@ -2034,7 +2095,6 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
__ pop(ecx);
__ push(eax);
__ push(ecx);
- increment_stack_height();
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2053,7 +2113,6 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1);
context()->DropAndPlug(1, eax); // Drop the key still on the stack.
}
@@ -2069,19 +2128,38 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
+
+ // Record call targets in unoptimized code, but not in the snapshot.
+ bool record_call_target = !Serializer::enabled();
+ if (record_call_target) {
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ }
CallFunctionStub stub(arg_count, flags);
- __ CallStub(&stub);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub, expr->id());
+ if (record_call_target) {
+ // There is a one element cache in the instruction stream.
+#ifdef DEBUG
+ int return_site_offset = masm()->pc_offset();
+#endif
+ Handle<Object> uninitialized =
+ CallFunctionStub::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ __ test(eax, Immediate(cell));
+ // Patching code in the stub assumes the opcode is 1 byte and there is
+ // word for a pointer in the operand.
+ ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
+ }
+
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- decrement_stack_height(arg_count + 1);
context()->DropAndPlug(1, eax);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
@@ -2091,18 +2169,14 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+ // Push the language mode.
+ __ push(Immediate(Smi::FromInt(language_mode())));
- // Push the strict mode flag. In harmony mode every eval call
- // is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
- __ push(Immediate(Smi::FromInt(strict_mode)));
+ // Push the start position of the scope the calls resides in.
+ __ push(Immediate(Smi::FromInt(scope()->start_position())));
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2128,33 +2202,15 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(callee);
// Reserved receiver slot.
__ push(Immediate(isolate()->factory()->undefined_value()));
- increment_stack_height();
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly in
- // generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(eax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
@@ -2164,17 +2220,16 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1); // Function is left on the stack.
context()->DropAndPlug(1, eax);
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
__ push(GlobalObjectOperand());
- increment_stack_height();
EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2193,7 +2248,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ push(eax); // Function.
__ push(edx); // Receiver.
- increment_stack_height(2);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2201,8 +2255,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Label call;
__ jmp(&call, Label::kNear);
__ bind(&done);
- // Push function. Stack height already incremented in slow case
- // above.
+ // Push function.
__ push(eax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
@@ -2235,7 +2288,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Load global receiver object.
__ mov(ebx, GlobalObjectOperand());
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- increment_stack_height();
// Emit function call.
EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
@@ -2276,13 +2328,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Handle<Code> construct_builtin =
isolate()->builtins()->JSConstructCall();
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- decrement_stack_height(arg_count + 1);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2294,7 +2345,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, Immediate(kSmiTagMask));
Split(zero, if_true, if_false, fall_through);
@@ -2302,7 +2353,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2314,7 +2366,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
Split(zero, if_true, if_false, fall_through);
@@ -2322,7 +2374,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2346,14 +2399,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, if_false);
__ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2367,14 +2421,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2390,7 +2445,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(not_zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2398,7 +2453,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2438,9 +2494,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
- __ add(Operand(ebx),
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ add(ebx,
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@@ -2449,9 +2505,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ add(Operand(ebx), Immediate(kPointerSize));
+ __ add(ebx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(not_equal, &loop);
// Reload map as register ebx was used as temporary above.
@@ -2475,12 +2531,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2494,14 +2551,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2515,14 +2573,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2536,7 +2595,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2544,8 +2603,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2568,14 +2627,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2590,16 +2650,16 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ pop(ebx);
- decrement_stack_height();
- __ cmp(eax, Operand(ebx));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ cmp(eax, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@@ -2613,8 +2673,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
@@ -2636,7 +2696,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2647,20 +2708,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
// Map is now in eax.
__ j(below, &null);
-
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- __ j(above_equal, &function);
-
- // Check if the constructor in the map is a function.
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ j(equal, &function);
+
+ __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ j(equal, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &non_function_constructor);
@@ -2692,7 +2757,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2700,12 +2765,12 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
- decrement_stack_height(2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -2713,8 +2778,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2730,9 +2795,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ bind(&heapnumber_allocated);
__ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
- 1);
+ __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
@@ -2741,8 +2807,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, Operand(ebx));
- __ movd(xmm0, Operand(eax));
+ __ movd(xmm1, ebx);
+ __ movd(xmm0, eax);
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
@@ -2763,34 +2829,35 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
- decrement_stack_height(3);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
VisitForStackValue(args->at(3));
__ CallStub(&stub);
- decrement_stack_height(4);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2808,8 +2875,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2820,18 +2888,17 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
- decrement_stack_height(2);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(ebx); // eax = value. ebx = object.
- decrement_stack_height();
Label done;
// If the object is a smi, return the value.
@@ -2843,17 +2910,19 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ mov(edx, eax);
- __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
+ __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -2861,12 +2930,12 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
NumberToStringStub stub;
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2884,7 +2953,8 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2892,18 +2962,15 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Register object = ebx;
Register index = eax;
- Register scratch = ecx;
Register result = edx;
__ pop(object);
- decrement_stack_height();
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2932,7 +2999,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2940,20 +3008,17 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Register object = ebx;
Register index = eax;
- Register scratch1 = ecx;
- Register scratch2 = edx;
+ Register scratch = edx;
Register result = eax;
__ pop(object);
- decrement_stack_height();
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -2982,7 +3047,8 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -2990,12 +3056,12 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
StringAddStub stub(NO_STRING_ADD_FLAGS);
__ CallStub(&stub);
- decrement_stack_height(2);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -3003,58 +3069,70 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
StringCompareStub stub;
__ CallStub(&stub);
- decrement_stack_height(2);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3063,31 +3141,43 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
+ __ j(equal, &proxy);
+
// InvokeFunction requires the function in edi. Move it in there.
__ mov(edi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(edi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height(arg_count + 1);
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(eax);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
- decrement_stack_height(3);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3119,14 +3209,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
- __ or_(temp, Operand(index_2));
+ __ or_(temp, index_2);
__ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
- __ cmp(temp, Operand(index_1));
+ __ cmp(temp, index_1);
__ j(below_equal, &slow_case);
- __ cmp(temp, Operand(index_2));
+ __ cmp(temp, index_2);
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
@@ -3139,16 +3229,35 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(Operand(index_2, 0), object);
__ mov(Operand(index_1, 0), temp);
- Label new_space;
- __ InNewSpace(elements, temp, equal, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ temp,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &no_remembered_set,
+ Label::kNear);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index_1,
+ temp,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index_2,
+ temp,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+
+ __ bind(&no_remembered_set);
- __ mov(object, elements);
- __ RecordWriteHelper(object, index_1, temp);
- __ RecordWriteHelper(elements, index_2, temp);
-
- __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
- __ add(Operand(esp), Immediate(3 * kPointerSize));
+ __ add(esp, Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
@@ -3156,12 +3265,12 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
- decrement_stack_height(3);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3209,7 +3318,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = eax;
@@ -3221,11 +3331,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ pop(left);
Label done, fail, ok;
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
- __ and_(Operand(tmp), right);
+ __ and_(tmp, right);
__ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@@ -3242,12 +3352,12 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ mov(eax, Immediate(isolate()->factory()->true_value()));
__ bind(&done);
- decrement_stack_height();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3265,14 +3375,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ test(FieldOperand(eax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3287,11 +3398,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
@@ -3316,7 +3428,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ sub(esp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3352,7 +3464,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
if (FLAG_debug_code) {
- __ cmp(index, Operand(array_length));
+ __ cmp(index, array_length);
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
@@ -3370,8 +3482,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
- __ add(Operand(index), Immediate(1));
- __ cmp(index, Operand(array_length));
+ __ add(index, Immediate(1));
+ __ cmp(index, array_length);
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
@@ -3405,10 +3517,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
+ __ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
- __ add(string_length, Operand(scratch));
+ __ add(string_length, scratch);
__ j(overflow, &bailout);
__ shr(string_length, 1);
@@ -3449,7 +3561,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
@@ -3490,7 +3602,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
@@ -3531,7 +3643,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
@@ -3543,10 +3655,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(3 * kPointerSize));
+ __ add(esp, Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- decrement_stack_height();
context()->Plug(eax);
}
@@ -3566,7 +3677,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Prepare for calling JS runtime function.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- increment_stack_height();
}
// Push the arguments ("left-to-right").
@@ -3588,11 +3698,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
}
- decrement_stack_height(arg_count);
- if (expr->is_jsruntime()) {
- decrement_stack_height();
- }
-
context()->Plug(eax);
}
@@ -3607,15 +3712,16 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ push(Immediate(Smi::FromInt(strict_mode_flag)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- decrement_stack_height(2);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
@@ -3657,18 +3763,41 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->true_value());
+ } else {
+ __ push(isolate()->factory()->true_value());
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->false_value());
+ } else {
+ __ push(isolate()->factory()->false_value());
+ }
+ __ bind(&done);
}
break;
}
@@ -3679,7 +3808,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ CallRuntime(Runtime::kTypeof, 1);
- decrement_stack_height();
context()->Plug(eax);
break;
}
@@ -3733,10 +3861,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
- ASSERT(expr->expression()->AsThrow() != NULL);
- VisitInCurrentContext(expr->expression());
- // Visiting Throw does not plug the context.
- context()->Plug(eax);
+ VisitForEffect(expr->expression());
return;
}
@@ -3761,20 +3886,17 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ push(Immediate(Smi::FromInt(0)));
- increment_stack_height();
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
VisitForAccumulatorValue(prop->obj());
__ push(eax);
- increment_stack_height();
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
- increment_stack_height();
EmitKeyedPropertyLoad(prop);
}
}
@@ -3805,7 +3927,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE:
__ push(eax);
- increment_stack_height();
break;
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
@@ -3823,9 +3944,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ add(eax, Immediate(Smi::FromInt(1)));
} else {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(eax, Immediate(Smi::FromInt(1)));
}
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
@@ -3835,9 +3956,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(eax, Immediate(Smi::FromInt(1)));
} else {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ add(eax, Immediate(Smi::FromInt(1)));
}
}
@@ -3879,10 +4000,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
- decrement_stack_height();
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3897,11 +4017,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- decrement_stack_height();
- decrement_stack_height();
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3950,20 +4068,25 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(eax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(eax, if_true);
@@ -3998,8 +4121,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
- Split(above_equal, if_true, if_false, fall_through);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+ __ j(equal, if_true);
+ __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+ Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
@@ -4017,18 +4143,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ cmp(eax, isolate()->factory()->undefined_value());
- Split(equal, if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -4036,9 +4151,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
-
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4046,21 +4164,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
- switch (expr->op()) {
+ switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- decrement_stack_height(2);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
@@ -4069,9 +4179,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- decrement_stack_height(2);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Operand(eax));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, eax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4084,43 +4193,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cc = equal;
- __ pop(edx);
break;
case Token::LT:
cc = less;
- __ pop(edx);
break;
case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ mov(edx, result_register());
- __ pop(eax);
+ cc = greater;
break;
case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ mov(edx, result_register());
- __ pop(eax);
+ cc = less_equal;
break;
case Token::GTE:
cc = greater_equal;
- __ pop(edx);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
- decrement_stack_height();
+ __ pop(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4131,8 +4231,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Operand(eax));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, eax);
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4143,7 +4243,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4151,15 +4253,20 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ cmp(eax, isolate()->factory()->null_value());
- if (expr->is_strict()) {
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Handle<Object> nil_value = nil == kNullValue ?
+ isolate()->factory()->null_value() :
+ isolate()->factory()->undefined_value();
+ __ cmp(eax, nil_value);
+ if (expr->op() == Token::EQ_STRICT) {
Split(equal, if_true, if_false, fall_through);
} else {
+ Handle<Object> other_nil_value = nil == kNullValue ?
+ isolate()->factory()->undefined_value() :
+ isolate()->factory()->null_value();
__ j(equal, if_true);
- __ cmp(eax, isolate()->factory()->undefined_value());
+ __ cmp(eax, other_nil_value);
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
@@ -4226,7 +4333,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ pop(edx);
- __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+ __ sub(edx, Immediate(masm_->CodeObject()));
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
@@ -4242,8 +4349,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
- __ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(edx));
+ __ add(edx, Immediate(masm_->CodeObject()));
+ __ jmp(edx);
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 9b5cc5640..e93353e5d 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ mov(r1, value);
- __ RecordWrite(elements, r0, r1);
+ __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
@@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
+ __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
+ __ sub(scratch2, Immediate(Smi::FromInt(2)));
+ __ cmp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
+ __ cmp(key, scratch);
__ j(greater_equal, slow_case);
return FieldOperand(backing_store,
key,
@@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
- __ xor_(ecx, Operand(edi));
+ __ xor_(ecx, edi);
__ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
@@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
- __ add(Operand(edi), Immediate(kPointerSize));
+ __ add(edi, Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
@@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, Operand(ecx));
+ __ sub(edi, ecx);
__ j(above_equal, &property_array_property);
// Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(ecx, Operand(edi));
+ __ add(ecx, edi);
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -606,14 +606,12 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register receiver = edx;
Register index = eax;
- Register scratch1 = ebx;
- Register scratch2 = ecx;
+ Register scratch = ecx;
Register result = eax;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -651,8 +649,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
- __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+ __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
+ __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
@@ -710,7 +708,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(mapped_location, eax);
__ lea(ecx, mapped_location);
__ mov(edx, eax);
- __ RecordWrite(ebx, ecx, edx);
+ __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx.
@@ -719,7 +717,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(unmapped_location, eax);
__ lea(edi, unmapped_location);
__ mov(edx, eax);
- __ RecordWrite(ebx, edi, edx);
+ __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -734,7 +732,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
+ Label check_if_double_array, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
@@ -750,22 +750,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
+ __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow);
- __ CmpInstanceType(edi, JS_PROXY_TYPE);
- __ j(equal, &slow);
- __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
// edx: JSObject
// ecx: key (a smi)
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(below, &fast);
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(below, &fast_object_with_map_check);
// Slow case: call runtime.
__ bind(&slow);
@@ -778,16 +774,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
- // edi: receiver->elements, a FixedArray
+ // ebx: receiver->elements, a FixedArray
+ // edi: receiver map
// flags: compare (ecx, edx.length())
// do not leave holes in the array:
__ j(not_equal, &slow);
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
- // Add 1 to receiver->length, and go to fast array write.
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, &check_if_double_array);
+ // Add 1 to receiver->length, and go to common element store code for Objects.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&fast_object_without_map_check);
+
+ __ bind(&check_if_double_array);
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, &slow);
+ // Add 1 to receiver->length, and go to common element store code for doubles.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
- __ jmp(&fast);
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -796,34 +804,64 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
+ // Check the key against the length in the array and fall through to the
+ // common store code.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- // Fast case: Do the store.
- __ bind(&fast);
+ // Fast case: Do the store, could either Object or double.
+ __ bind(&fast_object_with_map_check);
// eax: value
// ecx: key (a smi)
// edx: receiver
- // edi: FixedArray receiver->elements
- __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
+ // ebx: FixedArray receiver->elements
+ // edi: receiver map
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, &fast_double_with_map_check);
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(eax, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Escape to slow case when writing non-smi into smi-only array.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(edi, &slow, Label::kNear);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
- __ mov(edx, Operand(eax));
- __ RecordWrite(edi, 0, edx, ecx);
+ __ mov(edx, eax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, &slow);
+ __ bind(&fast_double_without_map_check);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
__ ret(0);
}
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -833,11 +871,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- eax);
+ Isolate* isolate = masm->isolate();
+ isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -863,9 +901,9 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Check for boolean.
__ bind(&non_string);
- __ cmp(edx, FACTORY->true_value());
+ __ cmp(edx, isolate->factory()->true_value());
__ j(equal, &boolean);
- __ cmp(edx, FACTORY->false_value());
+ __ cmp(edx, isolate->factory()->false_value());
__ j(not_equal, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -873,8 +911,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
+ isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
__ bind(&miss);
}
@@ -904,8 +941,9 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
+
// The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -929,10 +967,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -951,22 +989,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
+ // Push the receiver and the name of the function.
+ __ push(edx);
+ __ push(ecx);
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
+ // Call the entry.
+ CEntryStub stub(1);
+ __ mov(eax, Immediate(2));
+ __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
+ __ CallStub(&stub);
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- __ LeaveInternalFrame();
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -989,7 +1027,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -1003,7 +1041,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state) {
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1014,38 +1052,10 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
-
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
+ CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
+ extra_state);
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+ GenerateMiss(masm, argc, extra_state);
}
@@ -1111,13 +1121,17 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- __ EnterInternalFrame();
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- __ LeaveInternalFrame();
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(ecx); // save the key
+ __ push(edx); // pass the receiver
+ __ push(ecx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(ecx); // restore the key
+ // Leave the internal frame.
+ }
+
__ mov(edi, eax);
__ jmp(&do_call);
@@ -1143,10 +1157,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
+ CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1209,25 +1221,12 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(ecx, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -1536,6 +1535,51 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ebx : target map
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ mov(eax, edx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ebx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ebx : target map
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ mov(eax, edx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ebx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
#undef __
@@ -1547,11 +1591,9 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return less;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return less;
+ return greater;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return greater_equal;
+ return less_equal;
case Token::GTE:
return greater_equal;
default:
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 4e3ea9816..d5ef4d95a 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -33,6 +33,7 @@
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
@@ -44,22 +45,22 @@ class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
- int deoptimization_index)
+ Safepoint::DeoptMode mode)
: codegen_(codegen),
pointers_(pointers),
- deoptimization_index_(deoptimization_index) {}
+ deopt_mode_(mode) {}
virtual ~SafepointGenerator() { }
virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
private:
LCodeGen* codegen_;
LPointerMap* pointers_;
- int deoptimization_index_;
+ Safepoint::DeoptMode deopt_mode_;
};
@@ -70,6 +71,17 @@ bool LCodeGen::GenerateCode() {
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
+
+ CodeStub::GenerateFPStubs();
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
+ info()->osr_ast_id() != AstNode::kNoNumber;
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -133,7 +145,7 @@ bool LCodeGen::GeneratePrologue() {
// with undefined when called as functions (without an explicit
// receiver object). ecx is zero for method calls and non-zero for
// function calls.
- if (info_->is_strict_mode() || info_->is_native()) {
+ if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
@@ -144,6 +156,29 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
+ if (dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0),
+ Immediate(isolate()->factory()->frame_alignment_marker()));
+
+ __ bind(&do_not_pad);
+ }
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -187,7 +222,7 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both eax and esi. It replaces the context
// passed to us. It's saved in the stack and kept live in esi.
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
@@ -204,11 +239,12 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use a third register to avoid
- // clobbering esi.
- __ mov(ecx, esi);
- __ RecordWrite(ecx, context_offset, eax, ebx);
+ // Update the write barrier. This clobbers eax and ebx.
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -241,35 +277,23 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
}
+ EnsureSpaceForLazyDeopt();
return !is_aborted();
}
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
-
- // Pad code to ensure that the last piece of deferred code have
- // room for lazy bailout.
- while ((masm()->pc_offset() - LastSafepointEnd())
- < Deoptimizer::patch_size()) {
- __ nop();
- }
}
// Deferred code is the last part of the instruction sequence. Mark
@@ -317,6 +341,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
Immediate LCodeGen::ToImmediate(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
@@ -442,10 +472,8 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
-
__ call(code, mode);
-
- RegisterLazyDeoptimization(instr, safepoint_mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -473,7 +501,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
__ CallRuntime(fun, argc);
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -481,49 +509,28 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
- ASSERT(context->IsRegister() || context->IsStackSlot());
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
}
- } else {
- // Context is stack slot.
+ } else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
+ } else if (context->IsConstantOperand()) {
+ Handle<Object> literal =
+ chunk_->LookupLiteral(LConstantOperand::cast(context));
+ LoadHeapObject(esi, Handle<Context>::cast(literal));
+ } else {
+ UNREACHABLE();
}
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(),
- 0,
- deoptimization_environment->deoptimization_index());
- }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(
+ LEnvironment* environment, Safepoint::DeoptMode mode) {
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -545,14 +552,17 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
Translation translation(&translations_, frame_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment);
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -632,6 +642,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
@@ -663,15 +674,27 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index) {
+ Safepoint::DeoptMode deopt_mode) {
ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
@@ -684,22 +707,21 @@ void LCodeGen::RecordSafepoint(
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+ Safepoint::DeoptMode mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
}
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
+ RecordSafepoint(&empty_pointers, mode);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
+ Safepoint::DeoptMode mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
}
@@ -734,12 +756,6 @@ void LCodeGen::DoGap(LGap* gap) {
LParallelMove* move = gap->GetParallelMove(inner_pos);
if (move != NULL) DoParallelMove(move);
}
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
}
@@ -1200,8 +1216,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ Register reg = ToRegister(instr->result());
+ Handle<Object> handle = instr->value();
+ if (handle->IsHeapObject()) {
+ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+ } else {
+ __ Set(reg, Immediate(handle));
+ }
}
@@ -1527,32 +1548,40 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- if (right->IsConstantOperand()) {
- __ cmp(ToOperand(left), ToImmediate(right));
- } else {
- __ cmp(ToRegister(left), ToOperand(right));
- }
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
} else {
- EmitCmpI(left, right);
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ if (right->IsConstantOperand()) {
+ __ cmp(ToRegister(left), ToImmediate(right));
+ } else if (left->IsConstantOperand()) {
+ __ cmp(ToOperand(right), ToImmediate(left));
+ // We transposed the operands. Reverse the condition.
+ cc = ReverseCondition(cc);
+ } else {
+ __ cmp(ToRegister(left), ToOperand(right));
+ }
+ }
+ EmitBranch(true_block, false_block, cc);
}
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
}
@@ -1577,23 +1606,33 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Jump to the false block.
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ EmitGoto(false_block);
+ return;
+ }
int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(reg, factory()->null_value());
- if (instr->is_strict()) {
+ Handle<Object> nil_value = instr->nil() == kNullValue ?
+ factory()->null_value() :
+ factory()->undefined_value();
+ __ cmp(reg, nil_value);
+ if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, equal);
} else {
+ Handle<Object> other_nil_value = instr->nil() == kNullValue ?
+ factory()->undefined_value() :
+ factory()->null_value();
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ cmp(reg, factory()->undefined_value());
+ __ cmp(reg, other_nil_value);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1645,6 +1684,31 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+
+ Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+ return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsString(reg, temp, false_label);
+
+ EmitBranch(true_block, false_block, true_cond);
+}
+
+
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->InputAt(0));
@@ -1672,6 +1736,41 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+ __ test(eax, Operand(eax));
+
+ EmitBranch(true_block, false_block, condition);
+}
+
+
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1745,28 +1844,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(above_equal, is_true);
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ __ j(equal, is_true);
+ __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+ __ j(equal, is_true);
} else {
- __ j(above_equal, is_false);
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpb(Operand(temp2),
+ static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ j(above, is_false);
}
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
-
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1849,11 +1956,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
-
+ virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
-
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -1905,8 +2011,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
@@ -1933,31 +2039,15 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LEnvironment* env = instr->deoptimization_environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
// Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax);
}
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -1965,9 +2055,6 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
Label true_value, done;
__ test(eax, Operand(eax));
__ j(condition, &true_value, Label::kNear);
@@ -1991,6 +2078,17 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
+ if (dynamic_frame_alignment_) {
+ Label aligned;
+ // Frame alignment marker (padding) is below arguments,
+ // and receiver, so its return-address-relative offset is
+ // (num_arguments + 2) words.
+ __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+ Immediate(factory()->frame_alignment_marker()));
+ __ j(not_equal, &aligned);
+ __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+ __ bind(&aligned);
+ }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@@ -1998,7 +2096,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
- if (instr->hydrogen()->check_hole_value()) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
@@ -2019,20 +2117,40 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register object = ToRegister(instr->TempAt(0));
+ Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0));
- Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
+ ASSERT(!value.is(object));
+ Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
+
+ int offset = JSGlobalPropertyCell::kValueOffset;
+ __ mov(object, Immediate(cell_handle));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->check_hole_value()) {
- __ cmp(cell_operand, factory()->the_hole_value());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(FieldOperand(object, offset), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
- __ mov(cell_operand, value);
+ __ mov(FieldOperand(object, offset), value);
+
+ // Cells are always in the remembered set.
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteField(object,
+ offset,
+ value,
+ address,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ check_needed);
+ }
}
@@ -2042,7 +2160,7 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2060,10 +2178,19 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ mov(ContextOperand(context, instr->slot_index()), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, offset, value, temp);
+ __ RecordWriteContextSlot(context,
+ offset,
+ value,
+ temp,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -2084,7 +2211,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2280,16 +2407,14 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset);
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2359,6 +2484,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -2502,12 +2628,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
@@ -2526,7 +2649,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2590,8 +2713,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
- // Setup deoptimization.
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -2680,6 +2802,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -2911,6 +3034,14 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2950,6 +3081,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSin:
DoMathSin(instr);
break;
+ case kMathTan:
+ DoMathTan(instr);
+ break;
case kMathLog:
DoMathLog(instr);
break;
@@ -2966,10 +3100,9 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(instr->HasPointerMap());
ASSERT(instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
}
@@ -3002,12 +3135,12 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
}
@@ -3057,21 +3190,36 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, offset, value, temp);
+ __ RecordWriteField(object,
+ offset,
+ value,
+ temp,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(FieldOperand(temp, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWrite(temp, offset, value, object);
+ __ RecordWriteField(temp,
+ offset,
+ value,
+ object,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
}
@@ -3083,7 +3231,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3130,6 +3278,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -3146,6 +3295,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ __ test(value, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3162,13 +3318,21 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
+ __ RecordWrite(elements,
+ key,
+ value,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3199,99 +3363,75 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ Label not_applicable;
+ __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+ __ j(not_equal, &not_applicable);
+ __ mov(new_map_reg, to_map);
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register object_reg = ToRegister(instr->object());
+ __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ // Write barrier.
+ ASSERT_NE(instr->temp_reg(), NULL);
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(edx));
+ ASSERT(new_map_reg.is(ebx));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(edx));
+ ASSERT(new_map_reg.is(ebx));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- // Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ test(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ test(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ SmiUntag(result);
- __ add(index, Operand(result));
- __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(factory()->empty_string()));
- __ j(not_equal, deferred->entry());
- __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, deferred->entry());
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzx_w(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ movzx_b(result, FieldOperand(string,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ factory(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -3334,6 +3474,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3413,6 +3554,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -3463,7 +3605,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
// Done. Put the value in xmm0 into the value of the allocated heap
@@ -3480,6 +3622,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3514,8 +3657,8 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kNoDeoptimizationIndex);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
}
@@ -3581,16 +3724,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@@ -3623,8 +3756,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(Operand(input_reg), Immediate(kTooBigExponent));
__ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
- __ ffree(0);
- __ fincstp();
+ __ fstp(0);
DeoptimizeIf(no_condition, instr->environment());
// Reserve space for 64 bit answer.
@@ -3672,6 +3804,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -3882,9 +4024,16 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Operand operand = ToOperand(instr->InputAt(0));
- __ cmp(operand, instr->hydrogen()->target());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Register reg = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ cmp(reg, Operand::Cell(cell));
+ } else {
+ Operand operand = ToOperand(instr->value());
+ __ cmp(operand, instr->hydrogen()->target());
+ }
DeoptimizeIf(not_equal, instr->environment());
}
@@ -3987,11 +4136,17 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->constant_elements()));
+ __ push(Immediate(constant_elements));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4007,20 +4162,97 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(ecx));
+ ASSERT(!result.is(ecx));
+
+ if (FLAG_debug_code) {
+ LoadHeapObject(ecx, object);
+ __ cmp(source, ecx);
+ __ Assert(equal, "Unexpected object literal boilerplate");
+ }
+
+ // Increase the offset so that subsequent objects end up right after
+ // this one.
+ int current_offset = *offset;
+ int size = object->map()->instance_size();
+ *offset += size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ ASSERT(object->elements()->length() == 0 ||
+ object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ __ mov(ecx, FieldOperand(source, i));
+ __ mov(FieldOperand(result, current_offset + i), ecx);
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(ecx, Operand(result, *offset));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ } else {
+ __ mov(FieldOperand(result, total_offset), Immediate(value));
+ }
+ }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(Immediate(Smi::FromInt(size)));
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->constant_properties()));
+ __ push(Immediate(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -4029,11 +4261,16 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
: ObjectLiteral::kNoFlags;
__ push(Immediate(Smi::FromInt(flags)));
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4106,8 +4343,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(shared_info->language_mode());
__ push(Immediate(shared_info));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -4139,12 +4375,11 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
+ Condition final_branch_condition =
+ EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ if (final_branch_condition != no_condition) {
+ EmitBranch(true_block, false_block, final_branch_condition);
+ }
}
@@ -4188,10 +4423,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
- final_branch_condition = above_equal;
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+ __ j(equal, true_label);
+ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4209,11 +4446,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = zero;
} else {
- final_branch_condition = not_equal;
__ jmp(false_label);
- // A dead branch instruction will be generated after this point.
}
-
return final_branch_condition;
}
@@ -4246,9 +4480,27 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ while (padding_size-- > 0) {
+ __ nop();
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
+ EnsureSpaceForLazyDeopt();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4268,32 +4520,26 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
}
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
// Create safepoint generator that will also ensure enough space in the
// reloc info for patching in deoptimization (since this is invoking a
// builtin)
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- {
- PushSafepointRegistersScope scope(this);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RegisterLazyDeoptimization(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- }
-
- // The gap code includes the restoring of the safepoint registers.
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4303,10 +4549,15 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
if (instr->hydrogen()->is_function_entry()) {
// Perform stack overflow check.
Label done;
@@ -4319,7 +4570,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
StackCheckStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ EnsureSpaceForLazyDeopt();
__ bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -4329,8 +4583,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
}
}
@@ -4346,7 +4605,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}
@@ -4367,15 +4626,9 @@ void LCodeGen::DoIn(LIn* instr) {
}
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 615632742..9d1a4f78d 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -58,9 +58,10 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
+ dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
- deoptimization_reloc_size(),
+ last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -100,8 +101,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -130,8 +131,12 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- int strict_mode_flag() const {
- return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+ StrictModeFlag strict_mode_flag() const {
+ return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+ }
+ bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
+ void set_dynamic_frame_alignment(bool value) {
+ dynamic_frame_alignment_ = value;
}
LChunk* chunk() const { return chunk_; }
@@ -139,7 +144,6 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -205,10 +209,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
@@ -222,6 +227,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
@@ -235,6 +241,7 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
@@ -242,21 +249,17 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index);
+ Safepoint::DeoptMode mode);
void RecordPosition(int position);
- int LastSafepointEnd() {
- return static_cast<int>(safepoints_.GetPcAfterGap());
- }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
XMMRegister result,
bool deoptimize_on_undefined,
@@ -265,8 +268,10 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -276,6 +281,13 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
@@ -285,6 +297,15 @@ class LCodeGen BASE_EMBEDDED {
Handle<Map> type,
Handle<String> name);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
+ void EnsureSpaceForLazyDeopt();
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -297,16 +318,11 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
+ bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
-
- struct DeoptimizationRelocSize {
- int min_size;
- int last_pc_offset;
- };
-
- DeoptimizationRelocSize deoptimization_reloc_size;
+ int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -346,16 +362,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -366,6 +386,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 3dc220d3d..4e5f27854 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -110,22 +110,17 @@ void LInstruction::PrintTo(StringStream* stream) {
}
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- for (int i = 0; i < inputs_.length(); i++) {
+ for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- inputs_[i]->PrintTo(stream);
+ InputAt(i)->PrintTo(stream);
}
}
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- for (int i = 0; i < results_.length(); i++) {
- if (i > 0) stream->Add(" ");
- results_[i]->PrintTo(stream);
- }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
}
@@ -214,10 +209,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -229,6 +225,13 @@ void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream);
@@ -243,6 +246,14 @@ void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(1)->PrintTo(stream);
+ InputAt(2)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -351,7 +362,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (is_double) {
+ spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
+ spill_slot_count_++;
+ num_double_slots_++;
+ }
return spill_slot_count_++;
}
@@ -447,8 +462,14 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new LInstructionGap(block);
+ LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -523,7 +544,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
+ chunk_ = new(zone()) LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -559,14 +580,14 @@ LRegister* LChunkBuilder::ToOperand(Register reg) {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ XMMRegister::ToAllocationIndex(reg));
}
@@ -581,30 +602,30 @@ LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
}
LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
}
@@ -639,7 +660,7 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
}
@@ -664,14 +685,15 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
+ return Define(instr, new(zone()) LUnallocated(LUnallocated::NONE));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
@@ -679,14 +701,16 @@ template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr,
int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
@@ -707,7 +731,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -737,7 +763,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -749,7 +775,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -766,13 +793,14 @@ LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_));
return instr;
}
LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
return operand;
}
@@ -793,40 +821,17 @@ LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
+ return new(zone()) LLabel(instr->block());
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -839,7 +844,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, context, left, right);
+ LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -873,7 +878,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
}
LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
@@ -886,7 +891,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -906,7 +911,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
LArithmeticT* result =
- new LArithmeticT(op, context, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -994,20 +999,23 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
- int argument_index = 0;
+ LEnvironment* result =
+ new(zone()) LEnvironment(hydrogen_env->closure(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1016,7 +1024,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new(zone()) LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1028,7 +1036,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
}
@@ -1040,7 +1048,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
- return new LGoto(successor->block_id());
+ return new(zone()) LGoto(successor->block_id());
}
ToBooleanStub::Types expected = instr->expected_input_types();
// We need a temporary register when we have to access the map *or* we have
@@ -1048,24 +1056,24 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
// involving maps).
bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(new LBranch(UseRegister(v), temp));
+ return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
}
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return new LCmpMapAndBranch(value);
+ return new(zone()) LCmpMapAndBranch(value);
}
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+ return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements);
}
@@ -1073,7 +1081,7 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new LInstanceOf(context, left, right);
+ LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1081,7 +1089,7 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(
+ new(zone()) LInstanceOfKnownGlobal(
UseFixed(instr->context(), esi),
UseFixed(instr->left(), InstanceofStub::left()),
FixedTemp(edi));
@@ -1095,11 +1103,11 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* length = UseFixed(instr->length(), ebx);
LOperand* elements = UseFixed(instr->elements(), ecx);
LOperand* temp = FixedTemp(edx);
- LApplyArguments* result = new LApplyArguments(function,
- receiver,
- length,
- elements,
- temp);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements,
+ temp);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1107,42 +1115,44 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = UseAny(instr->argument());
- return new LPushArgument(argument);
+ return new(zone()) LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
}
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
+ return DefineAsRegister(new(zone()) LOuterContext(context));
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalObject(context));
+ return DefineAsRegister(new(zone()) LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
+ return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1150,7 +1160,7 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new LInvokeFunction(context, function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1162,17 +1172,20 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
ASSERT(instr->value()->representation().IsDouble());
LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+ input);
return DefineSameAsFirst(result);
} else if (op == kMathSin || op == kMathCos) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+ input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
- LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+ input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1197,7 +1210,7 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
argument_count_ -= instr->argument_count();
- LCallKeyed* result = new LCallKeyed(context, key);
+ LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1205,7 +1218,7 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallNamed* result = new LCallNamed(context);
+ LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1213,14 +1226,14 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallGlobal* result = new LCallGlobal(context);
+ LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1228,15 +1241,16 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(context, constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction(context);
+ LCallFunction* result = new(zone()) LCallFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1244,7 +1258,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
@@ -1263,8 +1277,26 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new(zone()) LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(instr->op(), context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
}
@@ -1272,21 +1304,11 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new LBitNotI(input);
+ LBitNotI* result = new(zone()) LBitNotI(input);
return DefineSameAsFirst(result);
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1296,7 +1318,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* temp = FixedTemp(edx);
LOperand* dividend = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new LDivI(dividend, divisor, temp);
+ LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1314,7 +1336,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+ LModI* mod =
+ new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
result = DefineSameAsFirst(mod);
} else {
// The temporary operand is necessary to ensure that right is
@@ -1322,7 +1345,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
+ LModI* mod = new(zone()) LModI(value, divisor, temp);
result = DefineFixed(mod, edx);
}
@@ -1339,7 +1362,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
}
@@ -1355,7 +1378,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
}
- LMulI* mul = new LMulI(left, right, temp);
+ LMulI* mul = new(zone()) LMulI(left, right, temp);
return AssignEnvironment(DefineSameAsFirst(mul));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
@@ -1372,7 +1395,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
+ LSubI* sub = new(zone()) LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1393,7 +1416,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
+ LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineSameAsFirst(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1418,21 +1441,19 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), xmm2) :
UseFixed(instr->right(), eax);
- LPower* result = new LPower(left, right);
+ LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
- LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LCmpT* result = new LCmpT(context, left, right);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1443,16 +1464,23 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ return new(zone()) LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ LOperand* left;
+ LOperand* right;
+ if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+ left = UseRegisterOrConstantAtStart(instr->left());
+ right = UseRegisterOrConstantAtStart(instr->right());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return new(zone()) LCmpIDAndBranch(left, right);
}
}
@@ -1461,49 +1489,73 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseAtStart(instr->right());
- return new LCmpObjectEqAndBranch(left, right);
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+ return new(zone()) LCmpConstantEqAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
// We only need a temp register for non-strict compare.
- LOperand* temp = instr->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+ LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+ return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
+ return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsStringAndBranch(UseRegister(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(instr->value()));
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr ->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
- TempRegister());
+ return new(zone()) LIsUndetectableAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+
+ LStringCompareAndBranch* result = new
+ LStringCompareAndBranch(context, left, right);
+
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
- TempRegister());
+ return new(zone()) LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(instr->value()),
+ TempRegister());
}
@@ -1512,14 +1564,14 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGetCachedArrayIndex(value));
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
+ return new(zone()) LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
@@ -1527,7 +1579,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
}
@@ -1535,32 +1587,32 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
+ return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayBaseLength(array));
+ return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LElementsKind(object));
+ return DefineAsRegister(new(zone()) LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new(zone()) LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(
+ return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
UseAtStart(instr->length())));
}
@@ -1576,7 +1628,7 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new LThrow(context, value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1599,7 +1651,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1611,10 +1663,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
(truncating && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
- LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+ LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
- return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
@@ -1624,7 +1676,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp);
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
@@ -1633,21 +1685,23 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
+ return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else {
- LNumberTagI* result = new LNumberTagI(value);
+ LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
- return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
}
}
UNREACHABLE();
@@ -1657,40 +1711,46 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LCheckInstanceType* result = new LCheckInstanceType(value, temp);
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp = TempRegister();
- LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
+ // If the target is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the target gets promoted before we
+ // emit code, we will still get the register but will do an immediate
+ // compare instead of the cell compare. This is safe.
+ LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
+ ? UseRegisterAtStart(instr->value())
+ : UseAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMap* result = new LCheckMap(value);
+ LCheckMap* result = new(zone()) LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1700,17 +1760,17 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
if (input_rep.IsDouble()) {
LOperand* reg = UseRegister(value);
- return DefineAsRegister(new LClampDToUint8(reg));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
LOperand* reg = UseFixed(value, eax);
- return DefineFixed(new LClampIToUint8(reg), eax);
+ return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
} else {
ASSERT(input_rep.IsTagged());
LOperand* reg = UseFixed(value, eax);
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new LClampTToUint8(reg, temp);
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
return AssignEnvironment(DefineFixed(result, eax));
}
}
@@ -1725,7 +1785,7 @@ LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
LOperand* reg = UseRegister(value);
LOperand* temp_reg =
CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
- result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+ result = DefineAsRegister(new(zone()) LDoubleToI(reg, temp_reg));
} else if (input_rep.IsInteger32()) {
// Canonicalization should already have removed the hydrogen instruction in
// this case, since it is a noop.
@@ -1738,29 +1798,29 @@ LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
// temps. Reserve xmm1 explicitly.
LOperand* xmm_temp =
CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
- result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+ result = DefineSameAsFirst(new(zone()) LTaggedToI(reg, xmm_temp));
}
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), eax));
+ return new(zone()) LReturn(UseFixed(instr->value(), eax));
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
+ return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
? TempRegister()
: NULL;
- return DefineAsRegister(new LConstantD(temp));
+ return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
+ return DefineAsRegister(new(zone()) LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1769,8 +1829,8 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1779,15 +1839,18 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* global_object = UseFixed(instr->global_object(), eax);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
- return instr->check_hole_value() ? AssignEnvironment(result) : result;
+ new(zone()) LStoreGlobalCell(UseTempRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1796,38 +1859,36 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), edx);
LOperand* value = UseFixed(instr->value(), eax);
LStoreGlobalGeneric* result =
- new LStoreGlobalGeneric(context, global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ return DefineAsRegister(new(zone()) LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
LOperand* value;
LOperand* temp;
+ LOperand* context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
- context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
- return new LStoreContextSlot(context, value, temp);
+ return new(zone()) LStoreContextSlot(context, value, temp);
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new LLoadNamedField(obj));
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -1838,12 +1899,12 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), eax);
LLoadNamedFieldPolymorphic* result =
- new LLoadNamedFieldPolymorphic(context, obj);
+ new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
- new LLoadNamedFieldPolymorphic(context, obj);
+ new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
@@ -1852,7 +1913,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), eax);
- LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1860,21 +1921,21 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+ TempRegister())));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
+ return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
+ return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
}
@@ -1884,7 +1945,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1896,7 +1957,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
- new LLoadKeyedFastDoubleElement(elements, key);
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1916,7 +1977,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer,
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
@@ -1932,7 +1993,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), eax);
- LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
+ LLoadKeyedGeneric* result =
+ new(zone()) LLoadKeyedGeneric(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1944,7 +2006,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseTempRegister(instr->object());
+ LOperand* obj = UseRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@@ -1952,7 +2014,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+ return AssignEnvironment(new(zone()) LStoreKeyedFastElement(obj, key, val));
}
@@ -1966,7 +2028,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new LStoreKeyedFastDoubleElement(elements, key, val);
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
@@ -1996,9 +2058,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
val = UseRegister(instr->value());
}
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2013,17 +2075,45 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->value()->representation().IsTagged());
LStoreKeyedGeneric* result =
- new LStoreKeyedGeneric(context, object, key, value);
+ new(zone()) LStoreKeyedGeneric(context, object, key, value);
return MarkAsCall(result, instr);
}
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LOperand* temp_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), eax);
+ LOperand* fixed_object_reg = FixedTemp(edx);
+ LOperand* new_map_reg = FixedTemp(ebx);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object,
+ new_map_reg,
+ fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = instr->is_in_object()
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = UseRegisterAtStart(instr->object());
+ }
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
@@ -2035,7 +2125,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
? TempRegister()
: NULL;
- return new LStoreNamedField(obj, val, temp);
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2044,7 +2134,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), edx);
LOperand* value = UseFixed(instr->value(), eax);
- LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
+ LStoreNamedGeneric* result =
+ new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
@@ -2053,7 +2144,7 @@ LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- LStringAdd* string_add = new LStringAdd(context, left, right);
+ LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
return MarkAsCall(DefineFixed(string_add, eax), instr);
}
@@ -2062,7 +2153,8 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -2070,38 +2162,51 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
+ return DefineAsRegister(new(zone()) LStringLength(string));
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LObjectLiteralFast(context), eax), instr);
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+ HObjectLiteralGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LObjectLiteralGeneric(context), eax), instr);
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
}
@@ -2109,7 +2214,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseAtStart(instr->object());
LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new LDeleteProperty(context, object, key);
+ LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2117,13 +2222,13 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
+ return AssignEnvironment(new(zone()) LOsrEntry);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
+ return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
@@ -2133,14 +2238,14 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- LCallStub* result = new LCallStub(context);
+ LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2158,14 +2263,15 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ LAccessArgumentsAt* result =
+ new(zone()) LAccessArgumentsAt(arguments, length, index);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new LToFastProperties(object);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2173,19 +2279,19 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseAtStart(instr->value());
- LTypeof* result = new LTypeof(context, value);
+ LTypeof* result = new(zone()) LTypeof(context, value);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
- return new LIsConstructCallAndBranch(TempRegister());
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
}
@@ -2209,7 +2315,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
- LLazyBailout* lazy_bailout = new LLazyBailout;
+ LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
@@ -2224,11 +2330,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new LStackCheck(context), instr);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
LOperand* context = UseAny(instr->context());
- return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2257,7 +2364,7 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseOrConstantAtStart(instr->key());
LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new LIn(context, key, object);
+ LIn* result = new(zone()) LIn(context, key, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 038049ca0..517064722 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -101,10 +101,12 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -126,7 +128,8 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -156,6 +159,7 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -191,8 +195,8 @@ class LInstruction: public ZoneObject {
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) = 0;
- virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
enum Opcode {
// Declare a unique enum value for each instruction.
@@ -288,9 +292,6 @@ class LTemplateInstruction: public LInstruction {
int TempCount() { return T; }
LOperand* TempAt(int i) { return temps_[i]; }
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
@@ -368,17 +369,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
};
@@ -615,17 +606,18 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
public:
- LIsNullAndBranch(LOperand* value, LOperand* temp) {
+ LIsNilAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
- bool is_strict() const { return hydrogen()->is_strict(); }
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -644,6 +636,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -671,6 +676,24 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
};
+class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -797,18 +820,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1230,10 +1250,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
public:
- explicit LStoreGlobalCell(LOperand* value) {
+ explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -1258,7 +1280,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* global_object() { return InputAt(1); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(2); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1292,7 +1314,6 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1309,7 +1330,9 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@@ -1422,17 +1445,19 @@ class LCallNamed: public LTemplateInstruction<1, 1, 0> {
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* context) {
+ explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
LOperand* context() { return inputs_[0]; }
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[1]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1614,7 +1639,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1636,7 +1660,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1726,7 +1750,31 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1798,6 +1846,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@@ -1910,16 +1960,29 @@ class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
};
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LObjectLiteralFast(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LObjectLiteral(LOperand* context) {
+ explicit LObjectLiteralGeneric(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
@@ -2070,6 +2133,7 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
+ num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2083,6 +2147,8 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ int num_double_slots() const { return num_double_slots_; }
+
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2124,6 +2190,7 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
+ int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2134,6 +2201,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
+ isolate_(graph->isolate()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2163,6 +2231,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
+ Zone* zone() { return isolate_->zone(); }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2259,12 +2328,12 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
@@ -2274,6 +2343,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
+ Isolate* isolate_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 837112a55..fcae7a2fc 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -44,7 +44,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true) {
+ allow_stub_calls_(true),
+ has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -52,33 +53,75 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register addr,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
+void MacroAssembler::InNewSpace(
+ Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == equal || cc == not_equal);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
}
+ // Check that we can use a test_b.
+ ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
+ ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+ int mask = (1 << MemoryChunk::IN_FROM_SPACE)
+ | (1 << MemoryChunk::IN_TO_SPACE);
+ // If non-zero, the page belongs to new-space.
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+ static_cast<uint8_t>(mask));
+ j(cc, condition_met, condition_met_distance);
+}
- // Compute the page start address from the heap object pointer, and reuse
- // the 'object' register for it.
- and_(object, ~Page::kPageAlignmentMask);
-
- // Compute number of region covering addr. See Page::GetRegionNumberForAddress
- // method for more details.
- shr(addr, Page::kRegionSizeLog2);
- and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
- // Set dirty mark for region.
- // Bit tests with a memory operand should be avoided on Intel processors,
- // as they usually have long latency and multiple uops. We load the bit base
- // operand to a register at first and store it back after bit set.
- mov(scratch, Operand(object, Page::kDirtyFlagOffset));
- bts(Operand(scratch), addr);
- mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+void MacroAssembler::RememberedSetHelper(
+ Register object, // Only used for debug checks.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ MacroAssembler::RememberedSetFinalAction and_then) {
+ Label done;
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(scratch, Operand::StaticVariable(store_buffer));
+ // Store pointer to buffer.
+ mov(Operand(scratch, 0), addr);
+ // Increment buffer top.
+ add(scratch, Immediate(kPointerSize));
+ // Write back new top of buffer.
+ mov(Operand::StaticVariable(store_buffer), scratch);
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kReturnAtEnd) {
+ Label buffer_overflowed;
+ j(not_equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ j(equal, &done, Label::kNear);
+ }
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(save_fp);
+ CallStub(&store_buffer_overflow);
+ if (and_then == kReturnAtEnd) {
+ ret(0);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ bind(&done);
+ }
}
@@ -112,100 +155,144 @@ void MacroAssembler::ClampUint8(Register reg) {
}
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance branch_near) {
- ASSERT(cc == equal || cc == not_equal);
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- mov(scratch, Operand(object));
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- and_(Operand(scratch),
- Immediate(ExternalReference::new_space_mask(isolate())));
- cmp(Operand(scratch),
- Immediate(ExternalReference::new_space_start(isolate())));
- j(cc, branch, branch_near);
- } else {
- int32_t new_space_start = reinterpret_cast<int32_t>(
- ExternalReference::new_space_start(isolate()).address());
- lea(scratch, Operand(object, -new_space_start));
- and_(scratch, isolate()->heap()->NewSpaceMask());
- j(cc, branch, branch_near);
+void MacroAssembler::RecordWriteArray(Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+ }
+
+ // Array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
+ // into an array of words.
+ Register dst = index;
+ lea(dst, Operand(object, index, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(index, Immediate(BitCast<int32_t>(kZapValue)));
}
}
-void MacroAssembler::RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch) {
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
+ // catch stores of Smis.
Label done;
// Skip barrier if writing a smi.
- STATIC_ASSERT(kSmiTag == 0);
- JumpIfSmi(value, &done, Label::kNear);
-
- InNewSpace(object, value, equal, &done, Label::kNear);
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done, Label::kNear);
+ }
- // The offset is relative to a tagged or untagged HeapObject pointer,
- // so either offset or offset + kHeapObjectTag must be a
- // multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize) ||
- IsAligned(offset + kHeapObjectTag, kPointerSize));
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
- Register dst = scratch;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // Array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- lea(dst, Operand(object, dst, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ lea(dst, FieldOperand(object, offset));
+ if (emit_debug_code()) {
+ Label ok;
+ test_b(dst, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
}
- RecordWriteHelper(object, dst, value);
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value) {
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ if (emit_debug_code()) {
+ AbortIfSmi(object);
+ }
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ if (FLAG_debug_code) {
+ Label ok;
+ cmp(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
- // Skip barrier if writing a smi.
- STATIC_ASSERT(kSmiTag == 0);
- JumpIfSmi(value, &done, Label::kNear);
-
- InNewSpace(object, value, equal, &done);
-
- RecordWriteHelper(object, address, value);
+ if (smi_check == INLINE_SMI_CHECK) {
+ // Skip barrier if writing a smi.
+ JumpIfSmi(value, &done, Label::kNear);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
}
@@ -224,7 +311,7 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
- xor_(dst, Operand(dst)); // Shorter than mov.
+ xor_(dst, dst); // Shorter than mov.
} else {
mov(dst, x);
}
@@ -265,7 +352,7 @@ void MacroAssembler::SafePush(const Immediate& x) {
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
// see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_address()[index]);
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
@@ -287,13 +374,111 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastElementValue);
j(above, fail, distance);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastSmiOnlyElementValue);
+ j(below_equal, fail, distance);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastSmiOnlyElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register maybe_number,
+ Register elements,
+ Register key,
+ Register scratch1,
+ XMMRegister scratch2,
+ Label* fail,
+ bool specialize_for_processor) {
+ Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+ JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+ CheckMap(maybe_number,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ cmp(FieldOperand(maybe_number, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ j(greater_equal, &maybe_nan, Label::kNear);
+
+ bind(&not_nan);
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ scratch2);
+ } else {
+ fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ }
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ j(greater, &is_nan, Label::kNear);
+ cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+ j(zero, &not_nan);
+ bind(&is_nan);
+ if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ } else {
+ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+ jmp(&have_double_value, Label::kNear);
+
+ bind(&smi_value);
+ // Value is a smi. Convert to a double and store.
+ // Preserve original value.
+ mov(scratch1, maybe_number);
+ SmiUntag(scratch1);
+ if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+ CpuFeatures::Scope fscope(SSE2);
+ cvtsi2sd(scratch2, scratch1);
+ movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ scratch2);
+ } else {
+ push(scratch1);
+ fild_s(Operand(esp, 0));
+ pop(scratch1);
+ fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -345,7 +530,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
cmp(scratch,
LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
@@ -355,8 +540,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
- ffree(0);
- fincstp();
+ fstp(0);
} else {
fucompp();
push(eax);
@@ -402,7 +586,7 @@ void MacroAssembler::AbortIfSmi(Register object) {
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
- mov(ebp, Operand(esp));
+ mov(ebp, esp);
push(esi);
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
@@ -429,7 +613,7 @@ void MacroAssembler::EnterExitFramePrologue() {
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp);
- mov(ebp, Operand(esp));
+ mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
@@ -451,14 +635,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
if (save_doubles) {
CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
- sub(Operand(esp), Immediate(space));
+ sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
- sub(Operand(esp), Immediate(argc * kPointerSize));
+ sub(esp, Immediate(argc * kPointerSize));
}
// Get the required frame alignment for the OS.
@@ -478,7 +662,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, Operand(eax));
+ mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
@@ -532,7 +716,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
void MacroAssembler::LeaveApiExitFrame() {
- mov(esp, Operand(ebp));
+ mov(esp, ebp);
pop(ebp);
LeaveExitFrameEpilogue();
@@ -540,47 +724,65 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+ HandlerType type,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // The pc (return address) is already on TOS.
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First compute the state and push the frame pointer and context.
+ unsigned state = StackHandler::OffsetField::encode(handler_index);
if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- push(Immediate(StackHandler::TRY_CATCH));
- } else {
- push(Immediate(StackHandler::TRY_FINALLY));
- }
push(ebp);
push(esi);
+ state |= (type == TRY_CATCH_HANDLER)
+ ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+ : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
} else {
ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for ebp. We expect the code throwing an exception to check ebp
- // before dereferencing it to restore the context.
- push(Immediate(StackHandler::ENTRY));
+ // The frame pointer does not point to a JS frame so we save NULL for
+ // ebp. We expect the code throwing an exception to check ebp before
+ // dereferencing it to restore the context.
push(Immediate(0)); // NULL frame pointer.
push(Immediate(Smi::FromInt(0))); // No context.
+ state |= StackHandler::KindField::encode(StackHandler::ENTRY);
}
- // Save the current handler as the next handler.
- push(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- // Link this handler as the new current one.
- mov(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
- isolate())),
- esp);
+
+ // Push the state and the code object.
+ push(Immediate(state));
+ push(CodeObject());
+
+ // Link the current handler as the next handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ push(Operand::StaticVariable(handler_address));
+ // Set this new handler as the current one.
+ mov(Operand::StaticVariable(handler_address), esp);
}
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ pop(Operand::StaticVariable(handler_address));
+ add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // eax = exception, edi = code object, edx = state.
+ mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
+ shr(edx, StackHandler::kKindWidth);
+ mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
+ SmiUntag(edx);
+ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+ jmp(edi);
}
@@ -588,36 +790,39 @@ void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // eax must hold the exception.
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in eax.
if (!value.is(eax)) {
mov(eax, value);
}
-
- // Drop the sp to the top of the handler.
- ExternalReference handler_address(Isolate::kHandlerAddress,
- isolate());
+ // Drop the stack pointer to the top of the top handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
mov(esp, Operand::StaticVariable(handler_address));
-
- // Restore next handler, context, and frame pointer; discard handler state.
+ // Restore the next handler.
pop(Operand::StaticVariable(handler_address));
+
+ // Remove the code object and state, compute the handler address in edi.
+ pop(edi); // Code object.
+ pop(edx); // Index and state.
+
+ // Restore the context and frame pointer.
pop(esi); // Context.
pop(ebp); // Frame pointer.
- pop(edx); // State.
// If the handler is a JS frame, restore the context to the frame.
- // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
- // of them.
+ // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
+ // ebp or esi.
Label skip;
- cmp(Operand(edx), Immediate(StackHandler::ENTRY));
- j(equal, &skip, Label::kNear);
+ test(esi, esi);
+ j(zero, &skip, Label::kNear);
mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
bind(&skip);
- ret(0);
+ JumpToHandlerEntry();
}
@@ -626,61 +831,55 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // eax must hold the exception.
- if (!value.is(eax)) {
- mov(eax, value);
- }
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress,
- isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done, Label::kNear);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- mov(esp, Operand(esp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(Operand::StaticVariable(handler_address));
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+ // The exception is expected in eax.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(eax, false);
- mov(Operand::StaticVariable(external_caught), eax);
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
+ mov(Operand::StaticVariable(external_caught), Immediate(false));
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate());
mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
mov(Operand::StaticVariable(pending_exception), eax);
+ } else if (!value.is(eax)) {
+ mov(eax, value);
}
- // Discard the context saved in the handler and clear the context pointer.
- pop(edx);
- Set(esi, Immediate(0));
+ // Drop the stack pointer to the top of the top stack handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ mov(esp, Operand::StaticVariable(handler_address));
- // Restore fp from handler and discard handler state.
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::ENTRY == 0);
+ test(Operand(esp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::KindField::kMask));
+ j(not_zero, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(Operand::StaticVariable(handler_address));
+
+ // Remove the code object and state, compute the handler address in edi.
+ pop(edi); // Code object.
+ pop(edx); // Index and state.
+
+ // Clear the context pointer and frame pointer (0 was saved in the handler).
+ pop(esi);
pop(ebp);
- pop(edx); // State.
- ret(0);
+ JumpToHandlerEntry();
}
@@ -696,7 +895,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmp(Operand(scratch), Immediate(0));
+ cmp(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
// Load the global context of the current context.
@@ -784,23 +983,23 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
mov(r1, r0);
not_(r0);
shl(r1, 15);
- add(r0, Operand(r1));
+ add(r0, r1);
// hash = hash ^ (hash >> 12);
mov(r1, r0);
shr(r1, 12);
- xor_(r0, Operand(r1));
+ xor_(r0, r1);
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(r1, r0);
shr(r1, 4);
- xor_(r0, Operand(r1));
+ xor_(r0, r1);
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(r1, r0);
shr(r1, 16);
- xor_(r0, Operand(r1));
+ xor_(r0, r1);
// Compute capacity mask.
mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
@@ -814,9 +1013,9 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+ add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
}
- and_(r2, Operand(r1));
+ and_(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
@@ -872,7 +1071,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
if (scratch.is(no_reg)) {
mov(result, Operand::StaticVariable(new_space_allocation_top));
} else {
- mov(Operand(scratch), Immediate(new_space_allocation_top));
+ mov(scratch, Immediate(new_space_allocation_top));
mov(result, Operand(scratch, 0));
}
}
@@ -931,7 +1130,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
if (!top_reg.is(result)) {
mov(top_reg, result);
}
- add(Operand(top_reg), Immediate(object_size));
+ add(top_reg, Immediate(object_size));
j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -942,12 +1141,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Tag result if requested.
if (top_reg.is(result)) {
if ((flags & TAG_OBJECT) != 0) {
- sub(Operand(result), Immediate(object_size - kHeapObjectTag));
+ sub(result, Immediate(object_size - kHeapObjectTag));
} else {
- sub(Operand(result), Immediate(object_size));
+ sub(result, Immediate(object_size));
}
} else if ((flags & TAG_OBJECT) != 0) {
- add(Operand(result), Immediate(kHeapObjectTag));
+ add(result, Immediate(kHeapObjectTag));
}
}
@@ -985,7 +1184,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// We assume that element_count*element_size + header_size does not
// overflow.
lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, Operand(result));
+ add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -1030,7 +1229,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
- add(result_end, Operand(result));
+ add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -1050,7 +1249,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(Operand(object), Immediate(~kHeapObjectTagMask));
+ and_(object, Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
Check(below, "Undo allocation of non allocated memory");
@@ -1089,7 +1288,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -1123,8 +1322,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
- add(Operand(scratch1), Immediate(kObjectAlignmentMask));
- and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+ add(scratch1, Immediate(kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
@@ -1258,7 +1457,7 @@ void MacroAssembler::CopyBytes(Register source,
Register scratch) {
Label loop, done, short_string, short_loop;
// Experimentation shows that the short string loop is faster if length < 10.
- cmp(Operand(length), Immediate(10));
+ cmp(length, Immediate(10));
j(less_equal, &short_string);
ASSERT(source.is(esi));
@@ -1273,12 +1472,12 @@ void MacroAssembler::CopyBytes(Register source,
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
- and_(Operand(scratch), Immediate(0x3));
- add(destination, Operand(scratch));
+ and_(scratch, Immediate(0x3));
+ add(destination, scratch);
jmp(&done);
bind(&short_string);
- test(length, Operand(length));
+ test(length, length);
j(zero, &done);
bind(&short_loop);
@@ -1293,13 +1492,40 @@ void MacroAssembler::CopyBytes(Register source,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ jmp(&entry);
+ bind(&loop);
+ mov(Operand(start_offset, 0), filler);
+ add(start_offset, Immediate(kPointerSize));
+ bind(&entry);
+ cmp(start_offset, end_offset);
+ j(less, &loop);
+}
+
+
+void MacroAssembler::BooleanBitTest(Register object,
+ int field_offset,
+ int bit_index) {
+ bit_index += kSmiTagSize + kSmiShiftSize;
+ ASSERT(IsPowerOf2(kBitsPerByte));
+ int byte_index = bit_index / kBitsPerByte;
+ int byte_bit_index = bit_index & (kBitsPerByte - 1);
+ test_b(FieldOperand(object, field_offset + byte_index),
+ static_cast<byte>(1 << byte_bit_index));
+}
+
+
+
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
Label ok;
- test(result, Operand(result));
+ test(result, result);
j(not_zero, &ok);
- test(op, Operand(op));
+ test(op, op);
j(sign, then_label);
bind(&ok);
}
@@ -1311,10 +1537,10 @@ void MacroAssembler::NegativeZeroTest(Register result,
Register scratch,
Label* then_label) {
Label ok;
- test(result, Operand(result));
+ test(result, result);
j(not_zero, &ok);
- mov(scratch, Operand(op1));
- or_(scratch, Operand(op2));
+ mov(scratch, op1);
+ or_(scratch, op2);
j(sign, then_label);
bind(&ok);
}
@@ -1323,7 +1549,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
@@ -1331,6 +1558,15 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CmpObjectType(function, JS_FUNCTION_TYPE, result);
j(not_equal, miss);
+ if (miss_on_bound_function) {
+ // If a bound function, go to miss label.
+ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
+ j(not_zero, miss);
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
@@ -1344,7 +1580,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
+ cmp(result, Immediate(isolate()->factory()->the_hole_value()));
j(equal, miss);
// If the function does not have an initial map, we're done.
@@ -1367,48 +1603,32 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
- return result;
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- add(Operand(esp), Immediate(num_arguments * kPointerSize));
+ add(esp, Immediate(num_arguments * kPointerSize));
}
mov(eax, Immediate(isolate()->factory()->undefined_value()));
}
@@ -1442,18 +1662,11 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1);
- ces.SaveDoubles();
+ CEntryStub ces(1, kSaveFPRegs);
CallStub(&ces);
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
// If the expected number of arguments of the runtime function is
@@ -1475,26 +1688,6 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
}
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return isolate()->heap()->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
- return TryCallStub(&ces);
-}
-
-
void MacroAssembler::CallExternalReference(ExternalReference ref,
int num_arguments) {
mov(eax, Immediate(num_arguments));
@@ -1517,17 +1710,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -1537,14 +1719,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(
- ExternalReference(fid, isolate()), num_arguments, result_size);
-}
-
-
// If true, a Handle<T> returned by value from a function with cdecl calling
// convention will be returned directly as a value of location_ field in a
// register eax.
@@ -1593,8 +1767,8 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
ExternalReference limit_address =
@@ -1607,8 +1781,8 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
mov(edi, Operand::StaticVariable(limit_address));
add(Operand::StaticVariable(level_address), Immediate(1));
- // Call the api function!
- call(function->address(), RelocInfo::RUNTIME_ENTRY);
+ // Call the api function.
+ call(function_address, RelocInfo::RUNTIME_ENTRY);
if (!kReturnHandlesDirectly) {
// PrepareCallApiFunction saved pointer to the output slot into
@@ -1623,7 +1797,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
Label leave_exit_frame;
// Check if the result handle holds 0.
- test(eax, Operand(eax));
+ test(eax, eax);
j(zero, &empty_handle);
// It was non-zero. Dereference to get the result value.
mov(eax, Operand(eax, 0));
@@ -1646,11 +1820,8 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- MaybeObject* result =
- TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
bind(&empty_handle);
// It was zero; the result is undefined.
mov(eax, isolate()->factory()->undefined_value());
@@ -1664,11 +1835,9 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
mov(edi, eax);
mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
mov(eax, Immediate(delete_extensions));
- call(Operand(eax));
+ call(eax);
mov(eax, edi);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -1680,15 +1849,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- return TryTailCallStub(&ces);
-}
-
-
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
// This macro takes the dst register to make the code more readable
// at the call sites. However, the dst register has to be ecx to
@@ -1698,10 +1858,10 @@ void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
if (call_kind == CALL_AS_FUNCTION) {
// Set to some non-zero smi by updating the least significant
// byte.
- mov_b(Operand(dst), 1 << kSmiTagSize);
+ mov_b(dst, 1 << kSmiTagSize);
} else {
// Set to smi zero by clearing the register.
- xor_(dst, Operand(dst));
+ xor_(dst, dst);
}
}
@@ -1746,7 +1906,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmp(expected.reg(), Operand(actual.reg()));
+ cmp(expected.reg(), actual.reg());
j(equal, &invoke);
ASSERT(actual.reg().is(eax));
ASSERT(expected.reg().is(ebx));
@@ -1758,7 +1918,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
mov(edx, Immediate(code_constant));
- add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_operand.is_reg(edx)) {
mov(edx, code_operand);
}
@@ -1784,6 +1944,9 @@ void MacroAssembler::InvokeCode(const Operand& code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, flag, Label::kNear, call_wrapper,
@@ -1809,8 +1972,11 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
- Operand dummy(eax);
+ Operand dummy(eax, 0);
InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
@@ -1832,6 +1998,9 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(fun.is(edi));
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1844,36 +2013,32 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- ASSERT(function->is_compiled());
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Get the function and setup the context.
- mov(edi, Immediate(Handle<JSFunction>(function)));
+ mov(edi, Immediate(function));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
- } else {
- Handle<Code> code(function->code());
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
- flag, call_wrapper, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag, call_wrapper, call_kind);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
@@ -1884,6 +2049,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
expected, expected, flag, call_wrapper, CALL_AS_METHOD);
}
+
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the JavaScript builtin function from the builtins object.
@@ -1893,6 +2059,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
+
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
@@ -1994,18 +2161,16 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
pop(scratch);
- add(Operand(esp), Immediate(bytes_dropped));
+ add(esp, Immediate(bytes_dropped));
push(scratch);
ret(0);
}
}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- add(Operand(esp), Immediate(stack_elements * kPointerSize));
+ add(esp, Immediate(stack_elements * kPointerSize));
}
}
@@ -2148,13 +2313,19 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
push(eax);
push(Immediate(p0));
push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
- CallRuntime(Runtime::kAbort, 2);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
// will not return here
int3();
}
@@ -2177,7 +2348,7 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
ASSERT(is_uintn(power + HeapNumber::kExponentBias,
HeapNumber::kExponentBits));
mov(scratch, Immediate(power + HeapNumber::kExponentBias));
- movd(dst, Operand(scratch));
+ movd(dst, scratch);
psllq(dst, HeapNumber::kMantissaBits);
}
@@ -2203,8 +2374,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
Label* failure) {
// Check that both objects are not smis.
STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, Operand(object1));
- and_(scratch1, Operand(object2));
+ mov(scratch1, object1);
+ and_(scratch1, object2);
JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
@@ -2233,12 +2404,12 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
- sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+ sub(esp, Immediate((num_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
- sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+ sub(esp, Immediate(num_arguments * kPointerSize));
}
}
@@ -2246,27 +2417,39 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
- mov(Operand(eax), Immediate(function));
+ mov(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
+ ASSERT(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
- call(Operand(function));
+ call(function);
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
- add(Operand(esp), Immediate(num_arguments * kPointerSize));
+ add(esp, Immediate(num_arguments * kPointerSize));
}
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
@@ -2288,6 +2471,198 @@ CodePatcher::~CodePatcher() {
}
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+ static_cast<uint8_t>(mask));
+ } else {
+ test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_near) {
+ HasColor(object, scratch0, scratch1,
+ on_black, on_black_near,
+ 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ Label::Distance has_color_distance,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
+ add(mask_scratch, mask_scratch); // Shift left 1 by adding.
+ j(zero, &word_boundary, Label::kNear);
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+ jmp(&other_color, Label::kNear);
+
+ bind(&word_boundary);
+ test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+
+ j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+ bind(&other_color);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+ mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ and_(bitmap_reg, addr_reg);
+ mov(ecx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+ shr(ecx, shift);
+ and_(ecx,
+ (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
+
+ add(bitmap_reg, ecx);
+ mov(ecx, addr_reg);
+ shr(ecx, kPointerSizeLog2);
+ and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
+ mov(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* value_is_white_and_not_data,
+ Label::Distance distance) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(not_zero, &done, Label::kNear);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ push(mask_scratch);
+ // shl. May overflow making the check conservative.
+ add(mask_scratch, mask_scratch);
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ pop(mask_scratch);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = ecx; // Holds map while checking type.
+ Register length = ecx; // Holds length of object after checking type.
+ Label not_heap_number;
+ Label is_data_object;
+
+ // Check for heap-number
+ mov(map, FieldOperand(value, HeapObject::kMapOffset));
+ cmp(map, FACTORY->heap_number_map());
+ j(not_equal, &not_heap_number, Label::kNear);
+ mov(length, Immediate(HeapNumber::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_heap_number);
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = ecx;
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
+ j(not_zero, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ Label not_external;
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ test_b(instance_type, kExternalStringTag);
+ j(zero, &not_external, Label::kNear);
+ mov(length, Immediate(ExternalString::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_external);
+ // Sequential string, either ASCII or UC16.
+ ASSERT(kAsciiStringTag == 0x04);
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ add(length, Immediate(0x04));
+ // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+ // by 2. If we multiply the string length as smi by this, it still
+ // won't overflow a 32-bit value.
+ ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqAsciiString::kMaxSize <=
+ static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
+ imul(length, FieldOperand(value, String::kLengthOffset));
+ shr(length, 2 + kSmiTagSize + kSmiShiftSize);
+ add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+ and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
+ length);
+ if (FLAG_debug_code) {
+ mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
+ Check(less_equal, "Live Bytes Count overflow chunk size");
+ }
+
+ bind(&done);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 1906644c3..03ec28a8a 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -29,6 +29,7 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -50,6 +51,13 @@ enum AllocationFlags {
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -61,42 +69,130 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, zero, branch, distance);
+ }
- // For page containing |object| mark region covering |addr| dirty.
- // RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object,
- Register addr,
- Register scratch);
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_zero, branch, distance);
+ }
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc, // equal for new space, not_equal otherwise.
- Label* branch,
- Label::Distance branch_near = Label::kFar);
+ // Check if an object has a given incremental marking color. Also uses ecx!
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ Label::Distance has_color_distance,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Label* object_is_white_and_not_data,
+ Label::Distance distance);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // Operand(reg, off).
+ void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
- // For page containing |object| mark region covering [object+offset]
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. If offset is zero, then the scratch register
- // contains the array index into the elements array represented as a
- // Smi. All registers are clobbered by the operation. RecordWrite
+ // Notify the garbage collector that we wrote a pointer into a fixed array.
+ // |array| is the array being stored into, |value| is the
+ // object being stored. |index| is the array index represented as a
+ // Smi. All registers are clobbered by the operation RecordWriteArray
// filters out smis so it does not update the write barrier if the
// value is a smi.
- void RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch);
+ void RecordWriteArray(
+ Register array,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
- // object being stored. All registers are clobbered by the
+ // object being stored. The address and value registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
- void RecordWrite(Register object,
- Register address,
- Register value);
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -105,15 +201,6 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -159,6 +246,15 @@ class MacroAssembler: public Assembler {
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+ }
+
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -182,7 +278,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -225,6 +321,29 @@ class MacroAssembler: public Assembler {
Label* fail,
Label::Distance distance = Label::kFar);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register maybe_number,
+ Register elements,
+ Register key,
+ Register scratch1,
+ XMMRegister scratch2,
+ Label* fail,
+ bool specialize_for_processor);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object)
@@ -277,7 +396,7 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- add(reg, Operand(reg));
+ add(reg, reg);
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
@@ -332,9 +451,10 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain. The return
- // address must be pushed before calling this helper.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Push a new try handler and link it into try handler chain.
+ void PushTryHandler(CodeLocation try_location,
+ HandlerType type,
+ int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
@@ -465,9 +585,19 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// ---------------------------------------------------------------------------
// Support functions.
+ // Check a boolean-bit of a Smi field.
+ void BooleanBitTest(Register object, int field_offset, int bit_index);
+
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
@@ -484,7 +614,8 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
// Generates code for reporting that an illegal operation has
// occurred.
@@ -502,19 +633,9 @@ class MacroAssembler: public Assembler {
// Call a code stub. Generate the code if necessary.
void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -522,19 +643,9 @@ class MacroAssembler: public Assembler {
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -545,23 +656,11 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
- // Convenience function: tail call a runtime routine (jump). Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry after
- // GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
@@ -586,19 +685,15 @@ class MacroAssembler: public Assembler {
// stores the pointer to the reserved slot into esi.
void PrepareCallApiFunction(int argc);
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers ebx, edi and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers ebx, edi and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Address function_address, int stack_space);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
-
// ---------------------------------------------------------------------------
// Utilities
@@ -667,6 +762,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// String utilities.
@@ -690,9 +788,14 @@ class MacroAssembler: public Assembler {
return SafepointRegisterStackIndex(reg.code());
}
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
private:
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -703,14 +806,10 @@ class MacroAssembler: public Assembler {
const Operand& code_operand,
Label* done,
InvokeFlag flag,
- Label::Distance done_near = Label::kFar,
+ Label::Distance done_distance,
const CallWrapper& call_wrapper = NullCallWrapper(),
CallKind call_kind = CALL_AS_METHOD);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -729,6 +828,24 @@ class MacroAssembler: public Assembler {
Register scratch,
bool gc_allowed);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Uses ecx as scratch and leaves addr_reg
+ // unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
@@ -764,26 +881,26 @@ class CodePatcher {
// Static helper functions.
// Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
+inline Operand FieldOperand(Register object,
+ Register index,
+ ScaleFactor scale,
+ int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index d175d9e03..dbf01abff 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -134,7 +134,7 @@ int RegExpMacroAssemblerIA32::stack_limit_slack() {
void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
if (by != 0) {
- __ add(Operand(edi), Immediate(by * char_size()));
+ __ add(edi, Immediate(by * char_size()));
}
}
@@ -152,8 +152,8 @@ void RegExpMacroAssemblerIA32::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
}
@@ -219,7 +219,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
int byte_offset = cp_offset * char_size();
if (check_end_of_string) {
// Check that there are at least str.length() characters left in the input.
- __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+ __ cmp(edi, Immediate(-(byte_offset + byte_length)));
BranchOrBacktrack(greater, on_failure);
}
@@ -288,7 +288,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
__ j(not_equal, &fallthrough);
- __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop.
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
BranchOrBacktrack(no_condition, on_equal);
__ bind(&fallthrough);
}
@@ -300,7 +300,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, Operand(edx)); // Length of capture.
+ __ sub(ebx, edx); // Length of capture.
// The length of a capture should not be negative. This can only happen
// if the end of the capture is unrecorded, or at a point earlier than
@@ -320,9 +320,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ push(backtrack_stackpointer());
// After this, the eax, ecx, and edi registers are available.
- __ add(edx, Operand(esi)); // Start of capture
- __ add(edi, Operand(esi)); // Start of text to match against capture.
- __ add(ebx, Operand(edi)); // End of text to match against capture.
+ __ add(edx, esi); // Start of capture
+ __ add(edi, esi); // Start of text to match against capture.
+ __ add(ebx, edi); // End of text to match against capture.
Label loop;
__ bind(&loop);
@@ -339,15 +339,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ movzx_b(ecx, Operand(edx, 0));
__ or_(ecx, 0x20);
- __ cmp(eax, Operand(ecx));
+ __ cmp(eax, ecx);
__ j(not_equal, &fail);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ add(Operand(edx), Immediate(1));
- __ add(Operand(edi), Immediate(1));
+ __ add(edx, Immediate(1));
+ __ add(edi, Immediate(1));
// Compare to end of match, and loop if not done.
- __ cmp(edi, Operand(ebx));
+ __ cmp(edi, ebx);
__ j(below, &loop);
__ jmp(&success);
@@ -361,9 +361,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Restore original value before continuing.
__ pop(backtrack_stackpointer());
// Drop original value of character position.
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ add(esp, Immediate(kPointerSize));
// Compute new value of character position after the matched part.
- __ sub(edi, Operand(esi));
+ __ sub(edi, esi);
} else {
ASSERT(mode_ == UC16);
// Save registers before calling C function.
@@ -389,16 +389,19 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Set byte_offset2.
// Found by adding negative string-end offset of current position (edi)
// to end of string.
- __ add(edi, Operand(esi));
+ __ add(edi, esi);
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
- __ add(edx, Operand(esi));
+ __ add(edx, esi);
__ mov(Operand(esp, 0 * kPointerSize), edx);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(compare, argument_count);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(compare, argument_count);
+ }
// Pop original values before reacting on result value.
__ pop(ebx);
__ pop(backtrack_stackpointer());
@@ -406,10 +409,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ pop(esi);
// Check if function returned non-zero for success or zero for failure.
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
- __ add(edi, Operand(ebx));
+ __ add(edi, ebx);
}
__ bind(&fallthrough);
}
@@ -425,7 +428,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Find length of back-referenced capture.
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
- __ sub(eax, Operand(edx)); // Length to check.
+ __ sub(eax, edx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
BranchOrBacktrack(less, on_no_match);
// Succeed on empty capture (including no capture)
@@ -433,7 +436,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Check that there are sufficient characters left in the input.
__ mov(ebx, edi);
- __ add(ebx, Operand(eax));
+ __ add(ebx, eax);
BranchOrBacktrack(greater, on_no_match);
// Save register to make it available below.
@@ -441,7 +444,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Compute pointers to match string and capture string
__ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- __ add(edx, Operand(esi)); // Start of capture.
+ __ add(edx, esi); // Start of capture.
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
@@ -456,10 +459,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
}
__ j(not_equal, &fail);
// Increment pointers into capture and match string.
- __ add(Operand(edx), Immediate(char_size()));
- __ add(Operand(ebx), Immediate(char_size()));
+ __ add(edx, Immediate(char_size()));
+ __ add(ebx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(below, &loop);
__ jmp(&success);
@@ -471,7 +474,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
__ bind(&success);
// Move current character position to position after match.
__ mov(edi, ecx);
- __ sub(Operand(edi), esi);
+ __ sub(edi, esi);
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
@@ -574,17 +577,17 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(Operand(eax), current_character());
- __ xor_(Operand(eax), Immediate(0x01));
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(eax), Immediate(0x0b));
+ __ sub(eax, Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ sub(eax, Immediate(0x2028 - 0x0b));
__ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
@@ -593,7 +596,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
case 'w': {
if (mode_ != ASCII) {
// Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(Operand(current_character()), Immediate('z'));
+ __ cmp(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
@@ -607,7 +610,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
Label done;
if (mode_ != ASCII) {
// Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(Operand(current_character()), Immediate('z'));
+ __ cmp(current_character(), Immediate('z'));
__ j(above, &done);
}
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
@@ -627,10 +630,10 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
// The opposite of '.'.
- __ mov(Operand(eax), current_character());
- __ xor_(Operand(eax), Immediate(0x01));
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(eax), Immediate(0x0b));
+ __ sub(eax, Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
if (mode_ == ASCII) {
BranchOrBacktrack(above, on_no_match);
@@ -641,7 +644,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ sub(eax, Immediate(0x2028 - 0x0b));
__ cmp(eax, 1);
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
@@ -668,7 +671,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
- // Start new stack frame.
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
__ push(ebp);
__ mov(ebp, esp);
// Save callee-save registers. Order here should correspond to order of
@@ -699,7 +707,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &exit_label_);
@@ -708,13 +716,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
- __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+ __ sub(esp, Immediate(num_registers_ * kPointerSize));
// Load string length.
__ mov(esi, Operand(ebp, kInputEnd));
// Load input position.
__ mov(edi, Operand(ebp, kInputStart));
// Set up edi to be negative offset from string end.
- __ sub(edi, Operand(esi));
+ __ sub(edi, esi);
// Set eax to address of char before start of the string.
// (effectively string position -1).
@@ -736,7 +744,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, +0), eax);
- __ sub(Operand(ecx), Immediate(kPointerSize));
+ __ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop);
}
@@ -777,12 +785,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (mode_ == UC16) {
__ lea(ecx, Operand(ecx, edx, times_2, 0));
} else {
- __ add(ecx, Operand(edx));
+ __ add(ecx, edx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
// Convert to index from start of string, not end.
- __ add(eax, Operand(ecx));
+ __ add(eax, ecx);
if (mode_ == UC16) {
__ sar(eax, 1); // Convert byte index to character index.
}
@@ -819,7 +827,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
CallCheckStackGuardState(ebx);
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &exit_label_);
@@ -854,7 +862,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), eax);
@@ -1133,6 +1141,11 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
@@ -1183,8 +1196,8 @@ void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
void RegExpMacroAssemblerIA32::SafeReturn() {
__ pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
}
@@ -1196,14 +1209,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerIA32::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerIA32::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1212,7 +1225,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ mov(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize));
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index ab62764e6..aa8f47a88 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -66,8 +66,8 @@ static void ProbeTable(Isolate* isolate,
__ j(not_equal, &miss);
// Jump to the first instruction in the code stub.
- __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(extra));
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
__ bind(&miss);
} else {
@@ -92,8 +92,8 @@ static void ProbeTable(Isolate* isolate,
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
// Jump to the first instruction in the code stub.
- __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(offset));
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
// Pop at miss.
__ bind(&miss);
@@ -107,12 +107,12 @@ static void ProbeTable(Isolate* isolate,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register r0,
+ Register r1) {
ASSERT(name->IsSymbol());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
@@ -142,19 +142,14 @@ static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ j(not_equal, miss_label);
Label done;
- MaybeObject* result =
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- if (result->IsFailure()) return result;
-
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
-
- return result;
}
@@ -165,25 +160,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
- Isolate* isolate = Isolate::Current();
Label miss;
- USE(extra2); // The register extra2 is not used on the ia32 platform.
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 8.
+ // Assert that code is valid. The shifting code relies on the entry size
+ // being 8.
ASSERT(sizeof(Entry) == 8);
- // Make sure the flags does not name a specific type.
+ // Assert the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
- // Make sure that there are no register conflicts.
+ // Assert that there are no register conflicts.
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
ASSERT(!extra.is(receiver));
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
- // Check scratch and extra registers are valid, and extra2 is unused.
+ // Assert scratch and extra registers are valid, and extra2 is unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra2.is(no_reg));
@@ -197,19 +190,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
+ ProbeTable(isolate(), masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(scratch, Operand(name));
- __ add(Operand(scratch), Immediate(flags));
+ __ sub(scratch, name);
+ __ add(scratch, Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
+ ProbeTable(isolate(), masm, flags, kSecondary, name, scratch, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@@ -228,14 +221,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
// Check we're still in the same context.
__ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
masm->isolate()->global());
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(masm->isolate()->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(masm->isolate()->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
@@ -318,7 +314,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, Operand(scratch1));
+ __ mov(eax, scratch1);
__ ret(0);
}
@@ -327,8 +323,10 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -348,12 +346,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+ __ mov(scratch, Immediate(interceptor));
__ push(scratch);
__ push(receiver);
__ push(holder);
@@ -361,11 +359,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
@@ -406,15 +405,15 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// frame.
// -----------------------------------
__ pop(scratch);
- __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
+ __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
__ push(scratch);
}
// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : object passing the type check
@@ -429,30 +428,25 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
// -- esp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(edi, Immediate(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ mov(edi, Immediate(function));
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
__ mov(Operand(esp, 2 * kPointerSize), edi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ mov(ecx, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
__ mov(Operand(esp, 3 * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(Handle<Object>(call_data)));
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
}
// Prepare arguments.
__ lea(eax, Operand(esp, 3 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
// Allocate the v8::Arguments structure in the arguments' space since
@@ -462,7 +456,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
__ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(Operand(eax), Immediate(argc * kPointerSize));
+ __ add(eax, Immediate(argc * kPointerSize));
__ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
__ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
// v8::Arguments::is_construct_call_.
@@ -472,12 +466,10 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ lea(eax, ApiParameterOperand(1));
__ mov(ApiParameterOperand(0), eax);
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ CallApiFunctionAndReturn(function_address,
+ argc + kFastApiCallArguments + 1);
}
@@ -486,22 +478,22 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
- Code::ExtraICState extra_ic_state)
+ Code::ExtraICState extra_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ extra_state_(extra_state) {}
+
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -509,45 +501,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -556,16 +530,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
Counters* counters = masm->isolate()->counters();
@@ -581,9 +553,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -596,10 +568,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -610,11 +583,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result =
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
@@ -633,33 +604,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
}
-
- return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -668,27 +633,30 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ // Leave the internal frame.
+ }
__ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
__ j(not_equal, interceptor_succeeded);
@@ -697,38 +665,32 @@ class CallInterceptorCompiler BASE_EMBEDDED {
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
- Code::ExtraICState extra_ic_state_;
+ Code::ExtraICState extra_state_;
};
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ jmp(code, RelocInfo::CODE_TARGET);
}
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(code, RelocInfo::CODE_TARGET);
}
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -751,12 +713,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch); // Return address.
__ push(receiver_reg);
- __ push(Immediate(Handle<Map>(transition)));
+ __ push(Immediate(transition));
__ push(eax);
__ push(scratch);
__ TailCallExternalReference(
@@ -767,11 +729,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(transition)));
+ Immediate(transition));
}
// Adjust for the number of properties stored in the object. Even in the
@@ -786,8 +748,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+ __ mov(name_reg, eax);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch,
+ kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -797,8 +763,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ __ mov(name_reg, eax);
+ __ RecordWriteField(scratch,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
}
// Return the value (register eax).
@@ -809,70 +779,58 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
+ Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
if (Serializer::enabled()) {
- __ mov(scratch, Immediate(Handle<Object>(cell)));
+ __ mov(scratch, Immediate(cell));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(masm->isolate()->factory()->the_hole_value()));
+ Immediate(the_hole));
} else {
- __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
- Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ cmp(Operand::Cell(cell), Immediate(the_hole));
}
__ j(not_equal, miss);
- return cell;
}
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = GenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
}
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
}
- return NULL;
}
-
#undef __
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between holder and object registers.
@@ -882,7 +840,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Keep track of the current object in register reg.
Register reg = object_reg;
- JSObject* current = object;
+ Handle<JSObject> current = object;
int depth = 0;
if (save_at_depth == depth) {
@@ -891,79 +849,58 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
- while (current != holder) {
- depth++;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
+ name = factory()->LookupSymbol(name);
}
- ASSERT(current->property_dictionary()->FindEntry(name) ==
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
+ reg = holder_reg; // From now on the object will be in holder_reg.
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ Handle<Map> current_map(current->map());
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ cmp(scratch1, Immediate(current_map));
+ } else {
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(current_map));
}
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (in_new_space) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, prototype);
}
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Handle<JSObject>(prototype));
}
if (save_at_depth == depth) {
@@ -973,7 +910,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Go to the next object in the prototype chain.
current = prototype;
}
- ASSERT(current == holder);
+ ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -987,40 +924,33 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
- };
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = GenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
+ }
+
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check the prototype chain.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
@@ -1028,40 +958,37 @@ void StubCompiler::GenerateLoadField(JSObject* object,
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3.is(reg));
__ pop(scratch3); // Get return address to place it below.
__ push(receiver); // receiver
- __ mov(scratch2, Operand(esp));
+ __ mov(scratch2, esp);
ASSERT(!scratch2.is(reg));
__ push(reg); // holder
// Push data from AccessorInfo.
- if (isolate()->heap()->InNewSpace(callback_handle->data())) {
- __ mov(scratch1, Immediate(callback_handle));
+ if (isolate()->heap()->InNewSpace(callback->data())) {
+ __ mov(scratch1, Immediate(callback));
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
} else {
- __ push(Immediate(Handle<Object>(callback_handle->data())));
+ __ push(Immediate(Handle<Object>(callback->data())));
}
// Save a pointer to where we pushed the arguments pointer.
@@ -1073,10 +1000,6 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(scratch3); // Restore return address.
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
// 3 elements array for v8::Agruments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
const int kStackSpace = 5;
@@ -1084,48 +1007,49 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ PrepareCallApiFunction(kApiArgc);
__ mov(ApiParameterOperand(0), ebx); // name.
- __ add(Operand(ebx), Immediate(kPointerSize));
+ __ add(ebx, Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), ebx); // arguments pointer.
// Emitting a stub call may try to allocate (if the code is not
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ CallApiFunctionAndReturn(getter_address, kStackSpace);
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(eax, Handle<Object>(value));
+ __ mov(eax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1141,9 +1065,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1158,47 +1082,49 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
- }
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
- __ LeaveInternalFrame();
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ // Leave the internal frame.
+ }
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into holder_reg.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1210,15 +1136,15 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), eax, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
@@ -1227,7 +1153,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
- __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+ __ mov(holder_reg, Immediate(callback));
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(holder_reg);
__ push(name_reg);
@@ -1257,17 +1183,17 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ cmp(ecx, Immediate(name));
__ j(not_equal, miss);
}
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1280,7 +1206,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(edx, miss);
}
@@ -1289,19 +1215,20 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
if (Serializer::enabled()) {
- __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(edi, Immediate(cell));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
} else {
- __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(edi, Operand::Cell(cell));
}
// Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(function)) {
+ if (isolate()->heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1314,31 +1241,26 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
Immediate(Handle<SharedFunctionInfo>(function->shared())));
- __ j(not_equal, miss);
} else {
- __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, miss);
+ __ cmp(edi, Immediate(function));
}
+ __ j(not_equal, miss);
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
+ extra_state_);
+ __ jmp(code, RelocInfo::CODE_TARGET);
}
-MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
- JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ int index,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1376,7 +1298,7 @@ MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -1384,19 +1306,19 @@ MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1406,8 +1328,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) {
- return isolate()->heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) {
+ return Handle<Code>::null();
}
Label miss;
@@ -1421,9 +1343,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx,
- holder, ebx,
- eax, edi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1441,21 +1362,25 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
+ __ add(eax, Immediate(Smi::FromInt(argc)));
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ cmp(eax, Operand(ecx));
+ __ cmp(eax, ecx);
__ j(greater, &attempt_to_grow_elements);
+ // Check if value is a smi.
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ JumpIfNotSmi(ecx, &with_write_barrier);
+
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
@@ -1463,20 +1388,27 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
- // Check if value is a smi.
- __ JumpIfNotSmi(ecx, &with_write_barrier);
-
- __ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
- __ InNewSpace(ebx, ecx, equal, &exit);
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(edi, &call_builtin);
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+ // Push the element.
+ __ lea(edx, FieldOperand(ebx,
+ eax, times_half_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ mov(Operand(edx, 0), ecx);
+
+ __ RecordWrite(ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
- __ RecordWriteHelper(ebx, edx, ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1484,6 +1416,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ jmp(&call_builtin);
}
+ __ mov(edi, Operand(esp, argc * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(edi, &no_fast_elements_check);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
+ __ bind(&no_fast_elements_check);
+
+ // We could be lucky and the elements array could be at the top of
+ // new-space. In this case we can just grow it in place by moving the
+ // allocation pointer up.
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
@@ -1497,33 +1442,43 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, Operand(ecx));
+ __ cmp(edx, ecx);
__ j(not_equal, &call_builtin);
- __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
+ __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
- __ mov(ecx, Operand(esp, argc * kPointerSize));
// Push the argument...
- __ mov(Operand(edx, 0), ecx);
+ __ mov(Operand(edx, 0), edi);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
Immediate(factory()->the_hole_value()));
}
+ // We know the elements array is in new space so we don't need the
+ // remembered set, but we just pushed a value onto it so we may have to
+ // tell the incremental marker to rescan the object that we just grew. We
+ // don't need to worry about the holes because they are in old space and
+ // already marked black.
+ __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta)));
+
+ // NOTE: This only happen in new-space, where we don't
+ // care about the black-byte-count on pages. Otherwise we should
+ // update that too if the object is black.
+
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
@@ -1535,19 +1490,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1557,8 +1512,8 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) {
- return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) {
+ return Handle<Code>::null();
}
Label miss, return_undefined, call_builtin;
@@ -1571,9 +1526,8 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx,
- holder, ebx,
- eax, edi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1585,7 +1539,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+ __ sub(ecx, Immediate(Smi::FromInt(1)));
__ j(negative, &return_undefined);
// Get the last element.
@@ -1594,7 +1548,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
+ __ cmp(eax, Immediate(factory()->the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
@@ -1618,20 +1572,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1641,8 +1594,8 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) {
- return isolate()->heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) {
+ return Handle<Code>::null();
}
const int argc = arguments().immediate();
@@ -1653,7 +1606,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1665,13 +1618,12 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Context::STRING_FUNCTION_INDEX,
eax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = ebx;
Register index = edi;
- Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1680,19 +1632,18 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ Set(index, Immediate(factory()->undefined_value()));
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1702,22 +1653,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
+ __ Set(ecx, Immediate(name));
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1727,8 +1677,8 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) {
- return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) {
+ return Handle<Code>::null();
}
const int argc = arguments().immediate();
@@ -1739,7 +1689,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1751,14 +1701,13 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Context::STRING_FUNCTION_INDEX,
eax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = eax;
Register index = edi;
- Register scratch1 = ebx;
- Register scratch2 = edx;
+ Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1767,20 +1716,19 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ Set(index, Immediate(factory()->undefined_value()));
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1790,22 +1738,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in ecx.
- __ Set(ecx, Immediate(Handle<String>(name)));
+ __ Set(ecx, Immediate(name));
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : function name
// -- esp[0] : return address
@@ -1819,23 +1766,22 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -1851,17 +1797,17 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ and_(code, Immediate(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, eax);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, eax);
+ generator.GenerateFast(masm());
__ ret(2 * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1869,19 +1815,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// ecx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1891,7 +1837,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -----------------------------------
if (!CpuFeatures::IsSupported(SSE2)) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
CpuFeatures::Scope use_sse2(SSE2);
@@ -1901,23 +1847,24 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -1998,19 +1945,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// ecx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2024,23 +1971,24 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) {
- return isolate()->heap()->undefined_value();
+ return Handle<Code>::null();
}
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(edx, &miss);
- CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2058,10 +2006,10 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ sar(ebx, kBitsPerInt - 1);
// Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, Operand(ebx));
+ __ xor_(eax, ebx);
// Add 1 or do nothing depending on ebx.
- __ sub(eax, Operand(ebx));
+ __ sub(eax, ebx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
@@ -2102,30 +2050,29 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// ecx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
@@ -2144,11 +2091,11 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
// Allocate space for v8::Arguments implicit values. Must be initialized
// before calling any runtime function.
- __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+ __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, depth, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+ name, depth, &miss);
// Move the return address on top of the stack.
__ mov(eax, Operand(esp, 3 * kPointerSize));
@@ -2156,27 +2103,24 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
- __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+ __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(
- Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ CheckType check) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2186,16 +2130,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -2210,15 +2152,13 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(isolate()->counters()->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
+ edi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2229,28 +2169,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a string or a symbol.
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(edx, &fast);
@@ -2260,18 +2197,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ cmp(edx, factory()->true_value());
@@ -2282,17 +2219,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, edi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ eax, holder, ebx, edx, edi, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2300,17 +2238,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2325,24 +2262,15 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the number of arguments.
const int argc = arguments().immediate();
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- edx,
- ebx,
- edi,
- eax,
- &miss);
- if (result->IsFailure()) return result;
+ CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
+ compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
+ &miss);
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2361,7 +2289,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Invoke the function.
__ mov(edi, eax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -2369,20 +2297,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(
- JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2392,23 +2319,17 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
@@ -2423,40 +2344,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(
// Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1);
- ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- } else {
- Handle<Code> code(function->code());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2466,27 +2378,23 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
Label miss;
// Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- edx, ecx, ebx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
// Handle store cache miss.
__ bind(&miss);
- __ mov(ecx, Immediate(Handle<String>(name))); // restore name
+ __ mov(ecx, Immediate(name)); // restore name
Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2514,7 +2422,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ pop(ebx); // remove the return address
__ push(edx); // receiver
- __ push(Immediate(Handle<AccessorInfo>(callback))); // callback info
+ __ push(Immediate(callback)); // callback info
__ push(ecx); // name
__ push(eax); // value
__ push(ebx); // restore return address
@@ -2534,8 +2442,9 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2583,9 +2492,10 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2599,13 +2509,9 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss);
-
// Compute the cell operand to use.
- Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
- if (Serializer::enabled()) {
- __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
- }
+ __ mov(ebx, Immediate(cell));
+ Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
@@ -2616,8 +2522,23 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ mov(cell_operand, eax);
+ Label done;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ __ mov(ecx, eax);
+ __ lea(edx, cell_operand);
+ // Cells are always in the remembered set.
+ __ RecordWrite(ebx, // Object.
+ edx, // Address.
+ ecx, // Value.
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
// Return the value (register eax).
+ __ bind(&done);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@@ -2633,10 +2554,10 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -2649,16 +2570,11 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
- __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ cmp(ecx, Immediate(name));
__ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- edx, ecx, ebx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2667,39 +2583,37 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(edx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_jsarray, elements_kind).GetCode();
+
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -2707,28 +2621,33 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
// -- esp[0] : return address
// -----------------------------------
Label miss;
- __ JumpIfSmi(edx, &miss);
-
- Register map_reg = ebx;
- __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- __ cmp(map_reg, map);
- __ j(equal, Handle<Code>(handler_ics->at(current)));
+ __ JumpIfSmi(edx, &miss, Label::kNear);
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ // ebx: receiver->map().
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ __ cmp(edi, receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i));
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ mov(ebx, Immediate(transitioned_maps->at(i)));
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -2749,15 +2668,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- edx,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2769,14 +2681,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, isolate()->heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -2793,10 +2705,11 @@ MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -2804,13 +2717,8 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
- MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
- edi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2819,10 +2727,10 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -2839,9 +2747,9 @@ MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -2849,21 +2757,13 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- eax,
- ecx,
- edx,
- ebx,
- edi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2873,11 +2773,12 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -2888,7 +2789,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(eax, &miss);
}
@@ -2897,10 +2798,10 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// Get the value from the cell.
if (Serializer::enabled()) {
- __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(ebx, Immediate(cell));
__ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
} else {
- __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(ebx, Operand::Cell(cell));
}
// Check for deleted property if property can actually be deleted.
@@ -2926,9 +2827,9 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- eax : key
@@ -2941,7 +2842,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -2955,11 +2856,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -2971,18 +2872,13 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
- ecx, edi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
+ GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
+ name, &miss);
__ bind(&miss);
-
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2991,10 +2887,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3006,11 +2903,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
- value, name, &miss);
+ GenerateLoadConstant(
+ receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3020,9 +2917,10 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3034,21 +2932,13 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- edx,
- eax,
- ecx,
- ebx,
- edi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
+ name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3058,7 +2948,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3070,7 +2961,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
@@ -3083,7 +2974,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3095,7 +2987,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
@@ -3108,7 +3000,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3120,7 +3013,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
@@ -3133,31 +3026,29 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Code* stub;
+
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(edx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3170,22 +3061,22 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
__ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- __ cmp(map_reg, map);
- __ j(equal, Handle<Code>(handler_ics->at(current)));
+ __ cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handler_ics->at(current));
}
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- eax : argc
// -- edi : constructor
@@ -3224,12 +3115,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ebx: initial map
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ AllocateInNewSpace(ecx,
- edx,
- ecx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
+ &generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
@@ -3260,7 +3147,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// edi: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
// Check if the argument assigned to the property is actually passed.
@@ -3298,7 +3185,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Move argc to ebx and retrieve and tag the JSObject to return.
__ mov(ebx, eax);
__ pop(eax);
- __ or_(Operand(eax), Immediate(kHeapObjectTag));
+ __ or_(eax, Immediate(kHeapObjectTag));
// Remove caller arguments and receiver from the stack and return.
__ pop(ecx);
@@ -3312,9 +3199,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Handle<Code> generic_construct_stub =
- isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+ __ jmp(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
@@ -3506,8 +3392,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// If we fail allocation of the HeapNumber, we still have a value on
// top of the FPU stack. Remove it.
__ bind(&failed_allocation);
- __ ffree();
- __ fincstp();
+ __ fstp(0);
// Fall through to slow case.
// Slow case: Jump to runtime.
@@ -3679,10 +3564,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ sub(esp, Immediate(2 * kPointerSize));
__ fisttp_d(Operand(esp, 0));
__ pop(ebx);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ add(esp, Immediate(kPointerSize));
} else {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
@@ -3824,8 +3709,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// A value was pushed on the floating point stack before the allocation, if
// the allocation fails it needs to be removed.
if (!CpuFeatures::IsSupported(SSE2)) {
- __ ffree();
- __ fincstp();
+ __ fstp(0);
}
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
@@ -3838,15 +3722,17 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3870,11 +3756,28 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
__ j(above_equal, &miss_force_generic);
}
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ mov(edx, Operand(eax));
- __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
- __ RecordWrite(edi, 0, edx, ecx);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(eax, &transition_elements_kind);
+ // ecx is a smi, use times_half_pointer_size instead of
+ // times_pointer_size
+ __ mov(FieldOperand(edi,
+ ecx,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize), eax);
+ } else {
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ // Do the store and update the write barrier.
+ // ecx is a smi, use times_half_pointer_size instead of
+ // times_pointer_size
+ __ lea(ecx, FieldOperand(edi,
+ ecx,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(Operand(ecx, 0), eax);
+ // Make sure to preserve the value in register eax.
+ __ mov(edx, eax);
+ __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
+ }
// Done.
__ ret(0);
@@ -3884,6 +3787,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+ // Handle transition to other elements kinds without using the generic stub.
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -3896,8 +3804,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan;
- Label have_double_value, not_nan;
+ Label miss_force_generic, transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3918,59 +3825,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
}
__ j(above_equal, &miss_force_generic);
- __ JumpIfSmi(eax, &smi_value, Label::kNear);
-
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
- __ j(greater_equal, &maybe_nan, Label::kNear);
-
- __ bind(&not_nan);
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&have_double_value);
- __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
- xmm0);
- __ ret(0);
- } else {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&have_double_value);
- __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
- __ ret(0);
- }
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ j(greater, &is_nan, Label::kNear);
- __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
- __ j(zero, &not_nan);
- __ bind(&is_nan);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
- } else {
- __ fld_d(Operand::StaticVariable(canonical_nan_reference));
- }
- __ jmp(&have_double_value, Label::kNear);
-
- __ bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- __ mov(edx, eax);
- __ SmiUntag(edx);
- __ push(edx);
- __ fild_s(Operand(esp, 0));
- __ pop(edx);
- __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ StoreNumberToDoubleElements(eax,
+ edi,
+ ecx,
+ edx,
+ xmm0,
+ &transition_elements_kind,
+ true);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3978,6 +3839,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+ // Handle transition to other elements kinds without using the generic stub.
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index b4f789cb4..498cf3af3 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -87,6 +87,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
+ target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
+ target);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 0f76a9a06..2c6d55b93 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -100,7 +100,11 @@ void IC::TraceIC(const char* type,
PrintF("]\n");
}
}
-#endif
+#endif // DEBUG
+
+
+#define TRACE_IC(type, name, old_state, new_target) \
+ ASSERT((TraceIC(type, name, old_state, new_target), true))
IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
@@ -167,7 +171,7 @@ static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
LookupResult* lookup,
Object* receiver) {
Object* end = lookup->IsProperty()
- ? lookup->holder() : isolate->heap()->null_value();
+ ? lookup->holder() : Object::cast(isolate->heap()->null_value());
for (Object* current = receiver;
current != end;
current = current->GetPrototype()) {
@@ -368,15 +372,13 @@ static bool HasInterceptorGetter(JSObject* object) {
}
-static void LookupForRead(Object* object,
- String* name,
+static void LookupForRead(Handle<Object> object,
+ Handle<String> name,
LookupResult* lookup) {
- AssertNoAllocation no_gc; // pointers must stay valid
-
// Skip all the objects with named interceptors, but
// without actual getter.
while (true) {
- object->Lookup(name, lookup);
+ object->Lookup(*name, lookup);
// Besides normal conditions (property not found or it's not
// an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine.
@@ -386,18 +388,18 @@ static void LookupForRead(Object* object,
return;
}
- JSObject* holder = lookup->holder();
- if (HasInterceptorGetter(holder)) {
+ Handle<JSObject> holder(lookup->holder());
+ if (HasInterceptorGetter(*holder)) {
return;
}
- holder->LocalLookupRealNamedProperty(name, lookup);
+ holder->LocalLookupRealNamedProperty(*name, lookup);
if (lookup->IsProperty()) {
ASSERT(lookup->type() != INTERCEPTOR);
return;
}
- Object* proto = holder->GetPrototype();
+ Handle<Object> proto(holder->GetPrototype());
if (proto->IsNull()) {
lookup->NotFound();
return;
@@ -408,31 +410,32 @@ static void LookupForRead(Object* object,
}
-Object* CallICBase::TryCallAsFunction(Object* object) {
- HandleScope scope(isolate());
- Handle<Object> target(object, isolate());
- Handle<Object> delegate = Execution::GetFunctionDelegate(target);
+Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
+ Handle<Object> delegate = Execution::GetFunctionDelegate(object);
- if (delegate->IsJSFunction()) {
+ if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
// Patch the receiver and use the delegate as the function to
- // invoke. This is used for invoking objects as if they were
- // functions.
- const int argc = this->target()->arguments_count();
+ // invoke. This is used for invoking objects as if they were functions.
+ const int argc = target()->arguments_count();
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *target);
+ frame->SetExpression(index, *object);
}
- return *delegate;
+ return delegate;
}
void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
Handle<Object> object) {
+ while (callee->IsJSFunctionProxy()) {
+ callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap());
+ }
+
if (callee->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
- if (function->shared()->strict_mode() || function->IsBuiltin()) {
+ if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
// Do not wrap receiver for strict mode functions or for builtins.
return;
}
@@ -464,31 +467,27 @@ MaybeObject* CallICBase::LoadFunction(State state,
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Object* result;
- { MaybeObject* maybe_result = object->GetElement(index);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- if (result->IsJSFunction()) return result;
+ Handle<Object> result = Object::GetElement(object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ if (result->IsJSFunction()) return *result;
// Try to find a suitable function delegate for the object at hand.
result = TryCallAsFunction(result);
- if (result->IsJSFunction()) return result;
+ if (result->IsJSFunction()) return *result;
// Otherwise, it will fail in the lookup step.
}
// Lookup the property in the object.
- LookupResult lookup;
- LookupForRead(*object, *name, &lookup);
+ LookupResult lookup(isolate());
+ LookupForRead(object, name, &lookup);
if (!lookup.IsProperty()) {
// If the object does not have the requested property, check which
// exception we need to throw.
- if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return TypeError("undefined_method", object, name);
+ return IsContextual(object)
+ ? ReferenceError("not_defined", name)
+ : TypeError("undefined_method", object, name);
}
// Lookup is valid: Update inline cache and stub cache.
@@ -498,53 +497,42 @@ MaybeObject* CallICBase::LoadFunction(State state,
// Get the property.
PropertyAttributes attr;
- Object* result;
- { MaybeObject* maybe_result =
- object->GetProperty(*object, &lookup, *name, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Object> result =
+ Object::GetProperty(object, object, &lookup, name, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
- if (lookup.type() == INTERCEPTOR) {
+ if (lookup.type() == INTERCEPTOR && attr == ABSENT) {
// If the object does not have the requested property, check which
// exception we need to throw.
- if (attr == ABSENT) {
- if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return TypeError("undefined_method", object, name);
- }
+ return IsContextual(object)
+ ? ReferenceError("not_defined", name)
+ : TypeError("undefined_method", object, name);
}
ASSERT(!result->IsTheHole());
- HandleScope scope(isolate());
- // Wrap result in a handle because ReceiverToObjectIfRequired may allocate
- // new object and cause GC.
- Handle<Object> result_handle(result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
// called as functions do.
- ReceiverToObjectIfRequired(result_handle, object);
+ ReceiverToObjectIfRequired(result, object);
- if (result_handle->IsJSFunction()) {
+ if (result->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(result);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
Debug* debug = isolate()->debug();
if (debug->StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
// cause GC.
- Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
debug->HandleStepIn(function, object, fp(), false);
- return *function;
}
#endif
-
- return *result_handle;
+ return *function;
}
// Try to find a suitable function delegate for the object at hand.
- result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
- if (result_handle->IsJSFunction()) return *result_handle;
+ result = TryCallAsFunction(result);
+ if (result->IsJSFunction()) return *result;
return TypeError("property_not_function", object, name);
}
@@ -594,89 +582,57 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
}
-MaybeObject* CallICBase::ComputeMonomorphicStub(
- LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name) {
+Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
+ State state,
+ Code::ExtraICState extra_state,
+ Handle<Object> object,
+ Handle<String> name) {
int argc = target()->arguments_count();
- MaybeObject* maybe_code = NULL;
+ Handle<JSObject> holder(lookup->holder());
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
- maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
- kind_,
- extra_ic_state,
- *name,
- *object,
- lookup->holder(),
- index);
- break;
+ return isolate()->stub_cache()->ComputeCallField(
+ argc, kind_, extra_state, name, object, holder, index);
}
case CONSTANT_FUNCTION: {
// Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
- JSFunction* function = lookup->GetConstantFunction();
- maybe_code =
- isolate()->stub_cache()->ComputeCallConstant(argc,
- kind_,
- extra_ic_state,
- *name,
- *object,
- lookup->holder(),
- function);
- break;
+ Handle<JSFunction> function(lookup->GetConstantFunction());
+ return isolate()->stub_cache()->ComputeCallConstant(
+ argc, kind_, extra_state, name, object, holder, function);
}
case NORMAL: {
- if (!object->IsJSObject()) return NULL;
+ // If we return a null handle, the IC will not be patched.
+ if (!object->IsJSObject()) return Handle<Code>::null();
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (lookup->holder()->IsGlobalObject()) {
- GlobalObject* global = GlobalObject::cast(lookup->holder());
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- if (!cell->value()->IsJSFunction()) return NULL;
- JSFunction* function = JSFunction::cast(cell->value());
- maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
- kind_,
- extra_ic_state,
- *name,
- *receiver,
- global,
- cell,
- function);
+ if (holder->IsGlobalObject()) {
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+ if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
+ Handle<JSFunction> function(JSFunction::cast(cell->value()));
+ return isolate()->stub_cache()->ComputeCallGlobal(
+ argc, kind_, extra_state, name, receiver, global, cell, function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
- if (lookup->holder() != *receiver) return NULL;
- maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
- kind_,
- extra_ic_state,
- *name,
- *receiver);
+ if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
+ return isolate()->stub_cache()->ComputeCallNormal(
+ argc, kind_, extra_state);
}
break;
}
- case INTERCEPTOR: {
- ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
- argc,
- kind_,
- extra_ic_state,
- *name,
- *object,
- lookup->holder());
- break;
- }
+ case INTERCEPTOR:
+ ASSERT(HasInterceptorGetter(*holder));
+ return isolate()->stub_cache()->ComputeCallInterceptor(
+ argc, kind_, extra_state, name, object, holder);
default:
- maybe_code = NULL;
- break;
+ return Handle<Code>::null();
}
- return maybe_code;
}
@@ -698,75 +654,57 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
// Compute the number of arguments.
int argc = target()->arguments_count();
- MaybeObject* maybe_code = NULL;
bool had_proto_failure = false;
+ Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- maybe_code =
- isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
- kind_,
- extra_ic_state);
+ code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
+ argc, kind_, extra_ic_state);
} else if (state == MONOMORPHIC) {
if (kind_ == Code::CALL_IC &&
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
- maybe_code = ComputeMonomorphicStub(lookup,
- state,
- extra_ic_state,
- object,
- name);
+ code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+ object, name);
} else if (kind_ == Code::CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
had_proto_failure = true;
- maybe_code = ComputeMonomorphicStub(lookup,
- state,
- extra_ic_state,
- object,
- name);
+ code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+ object, name);
} else {
- maybe_code =
- isolate()->stub_cache()->ComputeCallMegamorphic(argc,
- kind_,
- extra_ic_state);
+ code = isolate()->stub_cache()->ComputeCallMegamorphic(
+ argc, kind_, extra_ic_state);
}
} else {
- maybe_code = ComputeMonomorphicStub(lookup,
- state,
- extra_ic_state,
- object,
- name);
+ code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+ object, name);
}
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- Object* code;
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+ // If there's no appropriate stub we simply avoid updating the caches.
+ if (code.is_null()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED ||
state == PREMONOMORPHIC ||
state == MONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(Code::cast(code));
+ set_target(*code);
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Map* map = JSObject::cast(object->IsJSObject() ? *object :
- object->GetPrototype())->map();
-
+ Handle<JSObject> cache_object = object->IsJSObject()
+ ? Handle<JSObject>::cast(object)
+ : Handle<JSObject>(JSObject::cast(object->GetPrototype()));
// Update the stub cache.
- isolate()->stub_cache()->Set(*name, map, Code::cast(code));
+ isolate()->stub_cache()->Set(*name, cache_object->map(), *code);
}
- USE(had_proto_failure);
-#ifdef DEBUG
if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
- TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
- name, state, target());
-#endif
+ TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
+ name, state, target());
}
@@ -786,34 +724,22 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
int argc = target()->arguments_count();
- Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
- Map* map = heap->non_strict_arguments_elements_map();
+ Handle<Map> map =
+ isolate()->factory()->non_strict_arguments_elements_map();
if (object->IsJSObject() &&
- Handle<JSObject>::cast(object)->elements()->map() == map) {
- MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
+ Handle<JSObject>::cast(object)->elements()->map() == *map) {
+ Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments(
argc, Code::KEYED_CALL_IC);
- Object* code;
- if (maybe_code->ToObject(&code)) {
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedCallIC", key, state, target());
-#endif
- }
- } else if (FLAG_use_ic && state != MEGAMORPHIC &&
- !object->IsAccessCheckNeeded()) {
- MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
+ set_target(*code);
+ TRACE_IC("KeyedCallIC", key, state, target());
+ } else if (!object->IsAccessCheckNeeded()) {
+ Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic(
argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- Object* code;
- if (maybe_code->ToObject(&code)) {
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedCallIC", key, state, target());
-#endif
- }
+ set_target(*code);
+ TRACE_IC("KeyedCallIC", key, state, target());
}
}
- HandleScope scope(isolate());
Handle<Object> result = GetProperty(object, key);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
@@ -821,9 +747,9 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
// functions do not wrap the receiver, non-strict functions and objects
// called as functions do.
ReceiverToObjectIfRequired(result, object);
-
if (result->IsJSFunction()) return *result;
- result = Handle<Object>(TryCallAsFunction(*result));
+
+ result = TryCallAsFunction(result);
if (result->IsJSFunction()) return *result;
return TypeError("property_not_function", object, key);
@@ -846,53 +772,44 @@ MaybeObject* LoadIC::Load(State state,
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(isolate()->heap()->length_symbol())) {
- AssertNoAllocation no_allocation;
- Code* stub = NULL;
+ Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
- if (object->IsString()) {
- stub = isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringLength);
- } else {
- stub = isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength);
- }
+ stub = object->IsString()
+ ? isolate()->builtins()->LoadIC_StringLength()
+ : isolate()->builtins()->LoadIC_StringWrapperLength();
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- stub = isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength);
+ stub = isolate()->builtins()->LoadIC_StringWrapperLength();
} else if (state != MEGAMORPHIC) {
stub = megamorphic_stub();
}
- if (stub != NULL) {
- set_target(stub);
+ if (!stub.is_null()) {
+ set_target(*stub);
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
#endif
}
// Get the string if we have a string wrapper object.
- if (object->IsJSValue()) {
- return Smi::FromInt(
- String::cast(Handle<JSValue>::cast(object)->value())->length());
- }
- return Smi::FromInt(String::cast(*object)->length());
+ Handle<Object> string = object->IsJSValue()
+ ? Handle<Object>(Handle<JSValue>::cast(object)->value())
+ : object;
+ return Smi::FromInt(String::cast(*string)->length());
}
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_symbol())) {
- AssertNoAllocation no_allocation;
- Code* stub = NULL;
+ Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
- stub = isolate()->builtins()->builtin(
- Builtins::kLoadIC_ArrayLength);
+ stub = isolate()->builtins()->LoadIC_ArrayLength();
} else if (state != MEGAMORPHIC) {
stub = megamorphic_stub();
}
- if (stub != NULL) {
- set_target(stub);
+ if (!stub.is_null()) {
+ set_target(*stub);
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
@@ -903,23 +820,20 @@ MaybeObject* LoadIC::Load(State state,
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_symbol()) &&
- JSFunction::cast(*object)->should_have_prototype()) {
- { AssertNoAllocation no_allocation;
- Code* stub = NULL;
- if (state == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- stub = isolate()->builtins()->builtin(
- Builtins::kLoadIC_FunctionPrototype);
- } else if (state != MEGAMORPHIC) {
- stub = megamorphic_stub();
- }
- if (stub != NULL) {
- set_target(stub);
+ Handle<JSFunction>::cast(object)->should_have_prototype()) {
+ Handle<Code> stub;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->LoadIC_FunctionPrototype();
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (!stub.is_null()) {
+ set_target(*stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
- }
}
return Accessors::FunctionGetPrototype(*object, 0);
}
@@ -931,8 +845,8 @@ MaybeObject* LoadIC::Load(State state,
if (name->AsArrayIndex(&index)) return object->GetElement(index);
// Named lookup in the object.
- LookupResult lookup;
- LookupForRead(*object, *name, &lookup);
+ LookupResult lookup(isolate());
+ LookupForRead(object, name, &lookup);
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty()) {
@@ -951,17 +865,15 @@ MaybeObject* LoadIC::Load(State state,
if (lookup.IsProperty() &&
(lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
// Get the property.
- Object* result;
- { MaybeObject* maybe_result =
- object->GetProperty(*object, &lookup, *name, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Object> result =
+ Object::GetProperty(object, object, &lookup, name, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
// If the property is not present, check if we need to throw an
// exception.
if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
- return result;
+ return *result;
}
// Get the property.
@@ -984,120 +896,105 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
- MaybeObject* maybe_code = NULL;
- Object* code;
+ Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- maybe_code = pre_monomorphic_stub();
+ code = pre_monomorphic_stub();
} else if (!lookup->IsProperty()) {
// Nonexistent property. The result is undefined.
- maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
- *receiver);
+ code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
} else {
// Compute monomorphic stub.
+ Handle<JSObject> holder(lookup->holder());
switch (lookup->type()) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeLoadField(
- *name,
- *receiver,
- lookup->holder(),
- lookup->GetFieldIndex());
+ case FIELD:
+ code = isolate()->stub_cache()->ComputeLoadField(
+ name, receiver, holder, lookup->GetFieldIndex());
break;
- }
case CONSTANT_FUNCTION: {
- Object* constant = lookup->GetConstantFunction();
- maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
- *name, *receiver, lookup->holder(), constant);
+ Handle<Object> constant(lookup->GetConstantFunction());
+ code = isolate()->stub_cache()->ComputeLoadConstant(
+ name, receiver, holder, constant);
break;
}
- case NORMAL: {
- if (lookup->holder()->IsGlobalObject()) {
- GlobalObject* global = GlobalObject::cast(lookup->holder());
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
- *receiver,
- global,
- cell,
- lookup->IsDontDelete());
+ case NORMAL:
+ if (holder->IsGlobalObject()) {
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+ code = isolate()->stub_cache()->ComputeLoadGlobal(
+ name, receiver, global, cell, lookup->IsDontDelete());
} else {
// There is only one shared stub for loading normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
- if (lookup->holder() != *receiver) return;
- maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
+ if (!holder.is_identical_to(receiver)) return;
+ code = isolate()->stub_cache()->ComputeLoadNormal();
}
break;
- }
case CALLBACKS: {
- if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
- AccessorInfo* callback =
- AccessorInfo::cast(lookup->GetCallbackObject());
+ Handle<Object> callback_object(lookup->GetCallbackObject());
+ if (!callback_object->IsAccessorInfo()) return;
+ Handle<AccessorInfo> callback =
+ Handle<AccessorInfo>::cast(callback_object);
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
- *name, *receiver, lookup->holder(), callback);
+ code = isolate()->stub_cache()->ComputeLoadCallback(
+ name, receiver, holder, callback);
break;
}
- case INTERCEPTOR: {
- ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
- *name, *receiver, lookup->holder());
+ case INTERCEPTOR:
+ ASSERT(HasInterceptorGetter(*holder));
+ code = isolate()->stub_cache()->ComputeLoadInterceptor(
+ name, receiver, holder);
break;
- }
default:
return;
}
}
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
// Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+ if (state == UNINITIALIZED ||
+ state == PREMONOMORPHIC ||
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(Code::cast(code));
+ set_target(*code);
} else if (state == MONOMORPHIC) {
- set_target(megamorphic_stub());
+ set_target(*megamorphic_stub());
} else if (state == MEGAMORPHIC) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
- Map* map = JSObject::cast(object->IsJSObject() ? *object :
- object->GetPrototype())->map();
-
- isolate()->stub_cache()->Set(*name, map, Code::cast(code));
+ isolate()->stub_cache()->Set(*name, receiver->map(), *code);
}
-#ifdef DEBUG
- TraceIC("LoadIC", name, state, target());
-#endif
+ TRACE_IC("LoadIC", name, state, target());
}
-MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
+Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind) {
- return KeyedLoadElementStub(elements_kind).TryGetCode();
+ return KeyedLoadElementStub(elements_kind).GetCode();
}
-MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
- MapList* receiver_maps,
- CodeList* targets,
+Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
+ MapHandleList* receiver_maps,
StrictModeFlag strict_mode) {
- Object* object;
- KeyedLoadStubCompiler compiler;
- MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
- targets);
- if (!maybe_code->ToObject(&object)) return maybe_code;
+ CodeHandleList handler_ics(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map = receiver_maps->at(i);
+ Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+ receiver_map, strict_mode);
+ handler_ics.Add(cached_stub);
+ }
+ KeyedLoadStubCompiler compiler(isolate());
+ Handle<Code> code = compiler.CompileLoadPolymorphic(
+ receiver_maps, &handler_ics);
isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
- PROFILE(isolate(), CodeCreateEvent(
- Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
- Code::cast(object), 0));
- return object;
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0));
+ return code;
}
@@ -1107,9 +1004,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
bool force_generic_stub) {
// Check for values that can be converted into a symbol.
// TODO(1295): Remove this code.
- HandleScope scope(isolate());
if (key->IsHeapNumber() &&
- isnan(HeapNumber::cast(*key)->value())) {
+ isnan(Handle<HeapNumber>::cast(key)->value())) {
key = isolate()->factory()->nan_symbol();
} else if (key->IsUndefined()) {
key = isolate()->factory()->undefined_symbol();
@@ -1131,16 +1027,11 @@ MaybeObject* KeyedLoadIC::Load(State state,
if (object->IsString() &&
name->Equals(isolate()->heap()->length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
- Object* code = NULL;
- { MaybeObject* maybe_code =
- isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
- *string);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif // DEBUG
+ Handle<Code> code =
+ isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string);
+ ASSERT(!code.is_null());
+ set_target(*code);
+ TRACE_IC("KeyedLoadIC", name, state, target());
return Smi::FromInt(string->length());
}
@@ -1148,34 +1039,25 @@ MaybeObject* KeyedLoadIC::Load(State state,
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_symbol())) {
Handle<JSArray> array = Handle<JSArray>::cast(object);
- Object* code;
- { MaybeObject* maybe_code =
- isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
- *array);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif // DEBUG
- return JSArray::cast(*object)->length();
+ Handle<Code> code =
+ isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array);
+ ASSERT(!code.is_null());
+ set_target(*code);
+ TRACE_IC("KeyedLoadIC", name, state, target());
+ return array->length();
}
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_symbol()) &&
- JSFunction::cast(*object)->should_have_prototype()) {
+ Handle<JSFunction>::cast(object)->should_have_prototype()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- Object* code;
- { MaybeObject* maybe_code =
- isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
- *name, *function);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- set_target(Code::cast(code));
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif // DEBUG
+ Handle<Code> code =
+ isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
+ name, function);
+ ASSERT(!code.is_null());
+ set_target(*code);
+ TRACE_IC("KeyedLoadIC", name, state, target());
return Accessors::FunctionGetPrototype(*object, 0);
}
}
@@ -1184,15 +1066,14 @@ MaybeObject* KeyedLoadIC::Load(State state,
// the element or char if so.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- HandleScope scope(isolate());
// Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) set_target(generic_stub());
+ if (FLAG_use_ic) set_target(*generic_stub());
return Runtime::GetElementOrCharAt(isolate(), object, index);
}
// Named lookup.
- LookupResult lookup;
- LookupForRead(*object, *name, &lookup);
+ LookupResult lookup(isolate());
+ LookupForRead(object, name, &lookup);
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty() && IsContextual(object)) {
@@ -1206,17 +1087,15 @@ MaybeObject* KeyedLoadIC::Load(State state,
PropertyAttributes attr;
if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
// Get the property.
- Object* result;
- { MaybeObject* maybe_result =
- object->GetProperty(*object, &lookup, *name, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Object> result =
+ Object::GetProperty(object, object, &lookup, name, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
// If the property is not present, check if we need to throw an
// exception.
if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
- return result;
+ return *result;
}
return object->GetProperty(*object, &lookup, *name, &attr);
@@ -1227,44 +1106,38 @@ MaybeObject* KeyedLoadIC::Load(State state,
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
if (use_ic) {
- Code* stub = generic_stub();
+ Handle<Code> stub = generic_stub();
if (!force_generic_stub) {
if (object->IsString() && key->IsNumber()) {
if (state == UNINITIALIZED) {
stub = string_stub();
}
} else if (object->IsJSObject()) {
- JSObject* receiver = JSObject::cast(*object);
- Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
- Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
- if (elements_map == heap->non_strict_arguments_elements_map()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
- } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
- MaybeObject* maybe_stub = ComputeStub(receiver,
- false,
- kNonStrictMode,
- stub);
- stub = maybe_stub->IsFailure() ?
- NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+ } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+ stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
}
}
}
- if (stub != NULL) set_target(stub);
+ if (!stub.is_null()) set_target(*stub);
}
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", key, state, target());
-#endif // DEBUG
+ TRACE_IC("KeyedLoadIC", key, state, target());
// Get the property.
return Runtime::GetObjectProperty(isolate(), object, key);
}
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
- Handle<Object> object, Handle<String> name) {
+void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
+ State state,
+ Handle<Object> object,
+ Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
@@ -1274,68 +1147,60 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
- MaybeObject* maybe_code = NULL;
- Object* code;
+ Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- maybe_code = pre_monomorphic_stub();
+ code = pre_monomorphic_stub();
} else {
// Compute a monomorphic stub.
+ Handle<JSObject> holder(lookup->holder());
switch (lookup->type()) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
- *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
+ case FIELD:
+ code = isolate()->stub_cache()->ComputeKeyedLoadField(
+ name, receiver, holder, lookup->GetFieldIndex());
break;
- }
case CONSTANT_FUNCTION: {
- Object* constant = lookup->GetConstantFunction();
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
- *name, *receiver, lookup->holder(), constant);
+ Handle<Object> constant(lookup->GetConstantFunction());
+ code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
+ name, receiver, holder, constant);
break;
}
case CALLBACKS: {
- if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
- AccessorInfo* callback =
- AccessorInfo::cast(lookup->GetCallbackObject());
+ Handle<Object> callback_object(lookup->GetCallbackObject());
+ if (!callback_object->IsAccessorInfo()) return;
+ Handle<AccessorInfo> callback =
+ Handle<AccessorInfo>::cast(callback_object);
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
- *name, *receiver, lookup->holder(), callback);
+ code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ name, receiver, holder, callback);
break;
}
- case INTERCEPTOR: {
+ case INTERCEPTOR:
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- *name, *receiver, lookup->holder());
+ code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+ name, receiver, holder);
break;
- }
- default: {
+ default:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
- maybe_code = generic_stub();
+ code = generic_stub();
break;
- }
}
}
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(Code::cast(code));
+ set_target(*code);
} else if (state == MONOMORPHIC) {
- set_target(megamorphic_stub());
+ set_target(*megamorphic_stub());
}
-#ifdef DEBUG
- TraceIC("KeyedLoadIC", name, state, target());
-#endif
+ TRACE_IC("KeyedLoadIC", name, state, target());
}
@@ -1351,20 +1216,18 @@ static bool StoreICableLookup(LookupResult* lookup) {
}
-static bool LookupForWrite(JSReceiver* receiver,
- String* name,
+static bool LookupForWrite(Handle<JSObject> receiver,
+ Handle<String> name,
LookupResult* lookup) {
- receiver->LocalLookup(name, lookup);
+ receiver->LocalLookup(*name, lookup);
if (!StoreICableLookup(lookup)) {
return false;
}
- if (lookup->type() == INTERCEPTOR) {
- JSObject* object = JSObject::cast(receiver);
- if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
- object->LocalLookupRealNamedProperty(name, lookup);
- return StoreICableLookup(lookup);
- }
+ if (lookup->type() == INTERCEPTOR &&
+ receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
+ receiver->LocalLookupRealNamedProperty(*name, lookup);
+ return StoreICableLookup(lookup);
}
return true;
@@ -1376,58 +1239,58 @@ MaybeObject* StoreIC::Store(State state,
Handle<Object> object,
Handle<String> name,
Handle<Object> value) {
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
+ if (!object->IsJSObject()) {
+ // Handle proxies.
+ if (object->IsJSProxy()) {
+ return JSProxy::cast(*object)->
+ SetProperty(*name, *value, NONE, strict_mode);
+ }
+
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
- if (!object->IsJSReceiver()) {
// The length property of string values is read-only. Throw in strict mode.
if (strict_mode == kStrictMode && object->IsString() &&
name->Equals(isolate()->heap()->length_symbol())) {
return TypeError("strict_read_only_property", object, name);
}
- // Ignore stores where the receiver is not a JSObject.
+ // Ignore other stores where the receiver is not a JSObject.
+ // TODO(1475): Must check prototype chains of object wrappers.
return *value;
}
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSReceiver::cast(*object)->
- SetProperty(*name, *value, NONE, strict_mode);
- }
-
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- HandleScope scope(isolate());
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(isolate()->heap()->length_symbol())
- && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
+ && Handle<JSArray>::cast(receiver)->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
- Builtins::Name target = (strict_mode == kStrictMode)
- ? Builtins::kStoreIC_ArrayLength_Strict
- : Builtins::kStoreIC_ArrayLength;
- set_target(isolate()->builtins()->builtin(target));
+ Handle<Code> stub = (strict_mode == kStrictMode)
+ ? isolate()->builtins()->StoreIC_ArrayLength_Strict()
+ : isolate()->builtins()->StoreIC_ArrayLength();
+ set_target(*stub);
return receiver->SetProperty(*name, *value, NONE, strict_mode);
}
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
- if (LookupForWrite(*receiver, *name, &lookup)) {
+ if (LookupForWrite(receiver, name, &lookup)) {
// Generate a stub for this store.
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
} else {
@@ -1444,16 +1307,15 @@ MaybeObject* StoreIC::Store(State state,
}
if (receiver->IsJSGlobalProxy()) {
+ // TODO(ulan): find out why we patch this site even with --no-use-ic
// Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver.
- Code* stub = (strict_mode == kStrictMode)
+ Handle<Code> stub = (strict_mode == kStrictMode)
? global_proxy_stub_strict()
: global_proxy_stub();
- if (target() != stub) {
- set_target(stub);
-#ifdef DEBUG
- TraceIC("StoreIC", name, state, target());
-#endif
+ if (target() != *stub) {
+ set_target(*stub);
+ TRACE_IC("StoreIC", name, state, target());
}
}
@@ -1468,10 +1330,12 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
- // Skip JSGlobalProxy.
ASSERT(!receiver->IsJSGlobalProxy());
-
ASSERT(StoreICableLookup(lookup));
+ // These are not cacheable, so we never see such LookupResults here.
+ ASSERT(lookup->type() != HANDLER);
+ // We get only called for properties or transitions, see StoreICableLookup.
+ ASSERT(lookup->type() != NULL_DESCRIPTOR);
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1481,89 +1345,87 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// Compute the code stub for this store; used for rewriting to
// monomorphic state and making sure that the code stub is in the
// stub cache.
- MaybeObject* maybe_code = NULL;
- Object* code = NULL;
+ Handle<Code> code;
switch (type) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeStoreField(
- *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
+ case FIELD:
+ code = isolate()->stub_cache()->ComputeStoreField(name,
+ receiver,
+ lookup->GetFieldIndex(),
+ Handle<Map>::null(),
+ strict_mode);
break;
- }
case MAP_TRANSITION: {
if (lookup->GetAttributes() != NONE) return;
- HandleScope scope(isolate());
- ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = isolate()->stub_cache()->ComputeStoreField(
- *name, *receiver, index, *transition, strict_mode);
+ code = isolate()->stub_cache()->ComputeStoreField(
+ name, receiver, index, transition, strict_mode);
break;
}
- case NORMAL: {
+ case NORMAL:
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
- *name, *global, cell, strict_mode);
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+ code = isolate()->stub_cache()->ComputeStoreGlobal(
+ name, global, cell, strict_mode);
} else {
if (lookup->holder() != *receiver) return;
- maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+ code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
break;
- }
case CALLBACKS: {
- if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ Handle<Object> callback_object(lookup->GetCallbackObject());
+ if (!callback_object->IsAccessorInfo()) return;
+ Handle<AccessorInfo> callback =
+ Handle<AccessorInfo>::cast(callback_object);
if (v8::ToCData<Address>(callback->setter()) == 0) return;
- maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
- *name, *receiver, callback, strict_mode);
+ code = isolate()->stub_cache()->ComputeStoreCallback(
+ name, receiver, callback, strict_mode);
break;
}
- case INTERCEPTOR: {
+ case INTERCEPTOR:
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
- *name, *receiver, strict_mode);
+ code = isolate()->stub_cache()->ComputeStoreInterceptor(
+ name, receiver, strict_mode);
break;
- }
- default:
+ case CONSTANT_FUNCTION:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ return;
+ case HANDLER:
+ case NULL_DESCRIPTOR:
+ UNREACHABLE();
return;
}
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(Code::cast(code));
+ set_target(*code);
} else if (state == MONOMORPHIC) {
// Only move to megamorphic if the target changes.
- if (target() != Code::cast(code)) {
+ if (target() != *code) {
set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict()
: megamorphic_stub());
}
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
- isolate()->stub_cache()->Set(*name,
- receiver->map(),
- Code::cast(code));
+ isolate()->stub_cache()->Set(*name, receiver->map(), *code);
}
-#ifdef DEBUG
- TraceIC("StoreIC", name, state, target());
-#endif
+ TRACE_IC("StoreIC", name, state, target());
}
-static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
- Map* new_receiver_map) {
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+ Handle<Map> new_receiver_map) {
+ ASSERT(!new_receiver_map.is_null());
for (int current = 0; current < receiver_maps->length(); ++current) {
- if (receiver_maps->at(current) == new_receiver_map) {
+ if (!receiver_maps->at(current).is_null() &&
+ receiver_maps->at(current).is_identical_to(new_receiver_map)) {
return false;
}
}
@@ -1572,44 +1434,40 @@ static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
}
-void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
+void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub,
+ MapHandleList* result) {
ASSERT(stub->is_inline_cache_stub());
- if (stub == string_stub()) {
- return result->Add(isolate()->heap()->string_map());
+ if (!string_stub().is_null() && stub.is_identical_to(string_stub())) {
+ return result->Add(isolate()->factory()->string_map());
} else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
if (stub->ic_state() == MONOMORPHIC) {
- result->Add(Map::cast(stub->FindFirstMap()));
+ result->Add(Handle<Map>(stub->FindFirstMap()));
} else {
ASSERT(stub->ic_state() == MEGAMORPHIC);
AssertNoAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(stub, mask); !it.done(); it.next()) {
+ for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
+ Handle<Object> object(info->target_object());
ASSERT(object->IsMap());
- result->Add(Map::cast(object));
+ AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
}
}
}
}
-MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
- bool is_store,
+Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
+ StubKind stub_kind,
StrictModeFlag strict_mode,
- Code* generic_stub) {
+ Handle<Code> generic_stub) {
State ic_state = target()->ic_state();
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- Code* monomorphic_stub;
- MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
- is_store,
- strict_mode,
- generic_stub);
- if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
-
- return monomorphic_stub;
+ if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) &&
+ !IsTransitionStubKind(stub_kind)) {
+ return ComputeMonomorphicStub(
+ receiver, stub_kind, strict_mode, generic_stub);
}
- ASSERT(target() != generic_stub);
+ ASSERT(target() != *generic_stub);
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
@@ -1620,10 +1478,21 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
- MapList target_receiver_maps;
- GetReceiverMapsForStub(target(), &target_receiver_maps);
- if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
- // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
+ MapHandleList target_receiver_maps;
+ Handle<Map> receiver_map(receiver->map());
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ target_receiver_maps.Add(receiver_map);
+ } else {
+ GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
+ }
+ bool map_added =
+ AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
+ if (IsTransitionStubKind(stub_kind)) {
+ Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
+ map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
+ }
+ if (!map_added) {
+ // If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
return generic_stub;
}
@@ -1634,47 +1503,29 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
return generic_stub;
}
- PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
- Code::Flags flags = Code::ComputeFlags(this->kind(),
- MEGAMORPHIC,
- strict_mode);
- Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
- // If there is a cached stub, use it.
- if (!maybe_cached_stub->IsUndefined()) {
- ASSERT(maybe_cached_stub->IsCode());
- return Code::cast(maybe_cached_stub);
- }
- // Collect MONOMORPHIC stubs for all target_receiver_maps.
- CodeList handler_ics(target_receiver_maps.length());
- for (int i = 0; i < target_receiver_maps.length(); ++i) {
- Map* receiver_map(target_receiver_maps.at(i));
- MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode);
- Code* cached_stub;
- if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
- handler_ics.Add(cached_stub);
- }
- // Build the MEGAMORPHIC stub.
- Code* stub;
- MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
- &handler_ics,
- strict_mode);
- if (!maybe_stub->To(&stub)) return maybe_stub;
- MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
- if (maybe_update->IsFailure()) return maybe_update;
+ Handle<PolymorphicCodeCache> cache =
+ isolate()->factory()->polymorphic_code_cache();
+ Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, strict_mode);
+ Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ Handle<Code> stub =
+ ComputePolymorphicStub(&target_receiver_maps, strict_mode);
+ PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
return stub;
}
-MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
- Map* receiver_map,
+Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+ Handle<Map> receiver_map,
StrictModeFlag strict_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
- ASSERT(string_stub() != NULL);
+ ASSERT(!string_stub().is_null());
return string_stub();
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_elements() ||
+ receiver_map->has_fast_smi_only_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -1684,47 +1535,78 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
}
-MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
- bool is_store,
+Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
+ StubKind stub_kind,
StrictModeFlag strict_mode,
- Code* generic_stub) {
- Code* result = NULL;
+ Handle<Code> generic_stub) {
if (receiver->HasFastElements() ||
+ receiver->HasFastSmiOnlyElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
- MaybeObject* maybe_stub =
- isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
- receiver, is_store, strict_mode);
- if (!maybe_stub->To(&result)) return maybe_stub;
+ return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
+ receiver, stub_kind, strict_mode);
} else {
- result = generic_stub;
+ return generic_stub;
+ }
+}
+
+
+Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
+ StubKind stub_kind) {
+ switch (stub_kind) {
+ case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
+ case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
+ return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
+ break;
+ case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
+ return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
+ break;
+ default:
+ UNREACHABLE();
+ return Handle<Map>::null();
}
- return result;
}
-MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
+Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind) {
- return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+ return KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
}
-MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
- MapList* receiver_maps,
- CodeList* targets,
- StrictModeFlag strict_mode) {
- Object* object;
- KeyedStoreStubCompiler compiler(strict_mode);
- MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
- targets);
- if (!maybe_code->ToObject(&object)) return maybe_code;
+Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps,
+ StrictModeFlag strict_mode) {
+ // Collect MONOMORPHIC stubs for all target_receiver_maps.
+ CodeHandleList handler_ics(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map(receiver_maps->at(i));
+ Handle<Code> cached_stub;
+ Handle<Map> transitioned_map =
+ receiver_map->FindTransitionedMap(receiver_maps);
+ if (!transitioned_map.is_null()) {
+ cached_stub = ElementsTransitionAndStoreStub(
+ receiver_map->elements_kind(), // original elements_kind
+ transitioned_map->elements_kind(),
+ receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array
+ strict_mode).GetCode();
+ } else {
+ cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
+ strict_mode);
+ }
+ ASSERT(!cached_stub.is_null());
+ handler_ics.Add(cached_stub);
+ transitioned_maps.Add(transitioned_map);
+ }
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode);
+ Handle<Code> code = compiler.CompileStorePolymorphic(
+ receiver_maps, &handler_ics, &transitioned_maps);
isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(), CodeCreateEvent(
- Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
- Code::cast(object), 0));
- return object;
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0));
+ return code;
}
@@ -1737,6 +1619,12 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (key->IsSymbol()) {
Handle<String> name = Handle<String>::cast(key);
+ // Handle proxies.
+ if (object->IsJSProxy()) {
+ return JSProxy::cast(*object)->SetProperty(
+ *name, *value, NONE, strict_mode);
+ }
+
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@@ -1750,19 +1638,17 @@ MaybeObject* KeyedStoreIC::Store(State state,
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- HandleScope scope(isolate());
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
- if (result.is_null()) return Failure::Exception();
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
- // Lookup the property locally in the receiver.
- LookupResult lookup;
- receiver->LocalLookup(*name, &lookup);
-
// Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+ LookupResult lookup(isolate());
+ if (LookupForWrite(receiver, name, &lookup)) {
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ }
}
// Set the property.
@@ -1775,33 +1661,36 @@ MaybeObject* KeyedStoreIC::Store(State state,
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
- Code* stub = (strict_mode == kStrictMode)
+ Handle<Code> stub = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
if (object->IsJSObject()) {
- JSObject* receiver = JSObject::cast(*object);
- Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
- Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
- if (elements_map == heap->non_strict_arguments_elements_map()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
} else if (!force_generic) {
- if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
- HandleScope scope(isolate());
- MaybeObject* maybe_stub = ComputeStub(receiver,
- true,
- strict_mode,
- stub);
- stub = maybe_stub->IsFailure() ?
- NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+ if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+ StubKind stub_kind = STORE_NO_TRANSITION;
+ if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+ if (value->IsHeapNumber()) {
+ stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE;
+ } else if (value->IsHeapObject()) {
+ stub_kind = STORE_TRANSITION_SMI_TO_OBJECT;
+ }
+ } else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) {
+ if (!value->IsSmi() && !value->IsHeapNumber()) {
+ stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT;
+ }
+ }
+ stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
}
}
}
- if (stub != NULL) set_target(stub);
+ if (!stub.is_null()) set_target(*stub);
}
-#ifdef DEBUG
- TraceIC("KeyedStoreIC", key, state, target());
-#endif
+ TRACE_IC("KeyedStoreIC", key, state, target());
// Set the property.
return Runtime::SetObjectProperty(
@@ -1815,15 +1704,12 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
- // Skip JSGlobalProxy.
- if (receiver->IsJSGlobalProxy()) return;
-
- // Bail out if we didn't find a result.
- if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
-
- // If the property is read-only, we leave the IC in its current
- // state.
- if (lookup->IsReadOnly()) return;
+ ASSERT(!receiver->IsJSGlobalProxy());
+ ASSERT(StoreICableLookup(lookup));
+ // These are not cacheable, so we never see such LookupResults here.
+ ASSERT(lookup->type() != HANDLER);
+ // We get only called for properties or transitions, see StoreICableLookup.
+ ASSERT(lookup->type() != NULL_DESCRIPTOR);
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1833,75 +1719,68 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// Compute the code stub for this store; used for rewriting to
// monomorphic state and making sure that the code stub is in the
// stub cache.
- MaybeObject* maybe_code = NULL;
- Object* code = NULL;
+ Handle<Code> code;
switch (type) {
- case FIELD: {
- maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
- *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
+ case FIELD:
+ code = isolate()->stub_cache()->ComputeKeyedStoreField(
+ name, receiver, lookup->GetFieldIndex(),
+ Handle<Map>::null(), strict_mode);
break;
- }
- case MAP_TRANSITION: {
+ case MAP_TRANSITION:
if (lookup->GetAttributes() == NONE) {
- HandleScope scope(isolate());
- ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
- *name, *receiver, index, *transition, strict_mode);
+ code = isolate()->stub_cache()->ComputeKeyedStoreField(
+ name, receiver, index, transition, strict_mode);
break;
}
// fall through.
- }
- default: {
+ case NORMAL:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ case INTERCEPTOR:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
- maybe_code = (strict_mode == kStrictMode)
+ code = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
break;
- }
+ case HANDLER:
+ case NULL_DESCRIPTOR:
+ UNREACHABLE();
+ return;
}
- // If we're unable to compute the stub (not enough memory left), we
- // simply avoid updating the caches.
- if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+ ASSERT(!code.is_null());
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(Code::cast(code));
+ set_target(*code);
} else if (state == MONOMORPHIC) {
set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
+ ? *megamorphic_stub_strict()
+ : *megamorphic_stub());
}
-#ifdef DEBUG
- TraceIC("KeyedStoreIC", name, state, target());
-#endif
+ TRACE_IC("KeyedStoreIC", name, state, target());
}
+#undef TRACE_IC
+
+
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
-static JSFunction* CompileFunction(Isolate* isolate,
- JSFunction* function) {
- // Compile now with optimization.
- HandleScope scope(isolate);
- Handle<JSFunction> function_handle(function, isolate);
- CompileLazy(function_handle, CLEAR_EXCEPTION);
- return *function_handle;
-}
-
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1910,45 +1789,46 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
extra_ic_state,
args.at<Object>(0),
args.at<String>(1));
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ // Result could be a function or a failure.
+ JSFunction* raw_function = NULL;
+ if (!maybe_result->To(&raw_function)) return maybe_result;
// The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function was lazily compiled
+ // function it references gets called. If the function is lazily compiled
// then the first call will trigger a compilation. We check for this case
// and we do the compilation immediately, instead of waiting for the stub
- // currently attached to the JSFunction object to trigger compilation. We
- // do this in the case where we know that the inline cache is inside a loop,
- // because then we know that we want to optimize the function.
- if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
- return result;
- }
- return CompileFunction(isolate, JSFunction::cast(result));
+ // currently attached to the JSFunction object to trigger compilation.
+ if (raw_function->is_compiled()) return raw_function;
+
+ Handle<JSFunction> function(raw_function);
+ JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+ return *function;
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedCallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Object* result;
- { MaybeObject* maybe_result =
+ MaybeObject* maybe_result =
ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ // Result could be a function or a failure.
+ JSFunction* raw_function = NULL;
+ if (!maybe_result->To(&raw_function)) return maybe_result;
- if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
- return result;
- }
- return CompileFunction(isolate, JSFunction::cast(result));
+ if (raw_function->is_compiled()) return raw_function;
+
+ Handle<JSFunction> function(raw_function);
+ JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+ return *function;
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
LoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1958,7 +1838,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
// Used from ic-<arch>.cc
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1967,7 +1847,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1977,7 +1857,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope;
ASSERT(args.length() == 3);
StoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2046,7 +1926,7 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2080,7 +1960,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
- NoHandleAllocation na;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2402,7 +2282,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
- Object** builtin_args[] = { right.location() };
+ Handle<Object> builtin_args[] = { right };
Handle<Object> result = Execution::Call(builtin_function,
left,
ARRAY_SIZE(builtin_args),
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index ece5be9f0..81aa6b7c2 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -198,47 +198,60 @@ class CallICBase: public IC {
class Contextual: public BitField<bool, 0, 1> {};
class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
- protected:
- CallICBase(Code::Kind kind, Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- public:
+ // Returns a JSFunction or a Failure.
MUST_USE_RESULT MaybeObject* LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
protected:
- Code::Kind kind_;
+ CallICBase(Code::Kind kind, Isolate* isolate)
+ : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
bool TryUpdateExtraICState(LookupResult* lookup,
Handle<Object> object,
Code::ExtraICState* extra_ic_state);
- MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
- LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
- Handle<String> name);
+ // Compute a monomorphic stub if possible, otherwise return a null handle.
+ Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
+ State state,
+ Code::ExtraICState extra_state,
+ Handle<Object> object,
+ Handle<String> name);
- // Update the inline cache and the global stub cache based on the
- // lookup result.
+ // Update the inline cache and the global stub cache based on the lookup
+ // result.
void UpdateCaches(LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
- // Returns a JSFunction if the object can be called as a function,
- // and patches the stack to be ready for the call.
- // Otherwise, it returns the undefined value.
- Object* TryCallAsFunction(Object* object);
+ // Returns a JSFunction if the object can be called as a function, and
+ // patches the stack to be ready for the call. Otherwise, it returns the
+ // undefined value.
+ Handle<Object> TryCallAsFunction(Handle<Object> object);
void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
static void Clear(Address address, Code* target);
+ // Platform-specific code generation functions used by both call and
+ // keyed call.
+ static void GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state);
+
+ static void GenerateNormal(MacroAssembler* masm, int argc);
+
+ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state);
+
+ Code::Kind kind_;
+
friend class IC;
};
@@ -252,16 +265,24 @@ class CallIC: public CallICBase {
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state) {
- GenerateMiss(masm, argc, extra_ic_state);
+ Code::ExtraICState extra_state) {
+ GenerateMiss(masm, argc, extra_state);
}
+
static void GenerateMiss(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state);
+ Code::ExtraICState extra_state) {
+ CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
+ }
+
static void GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state);
- static void GenerateNormal(MacroAssembler* masm, int argc);
+
+ static void GenerateNormal(MacroAssembler* masm, int argc) {
+ CallICBase::GenerateNormal(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
+ }
};
@@ -280,7 +301,12 @@ class KeyedCallIC: public CallICBase {
static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
- static void GenerateMiss(MacroAssembler* masm, int argc);
+
+ static void GenerateMiss(MacroAssembler* masm, int argc) {
+ CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
+ Code::kNoExtraICState);
+ }
+
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
@@ -321,17 +347,15 @@ class LoadIC: public IC {
Handle<String> name);
// Stub accessors.
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kLoadIC_Megamorphic);
+ Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->LoadIC_Megamorphic();
}
static Code* initialize_stub() {
return Isolate::Current()->builtins()->builtin(
Builtins::kLoadIC_Initialize);
}
- Code* pre_monomorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kLoadIC_PreMonomorphic);
+ Handle<Code> pre_monomorphic_stub() {
+ return isolate()->builtins()->LoadIC_PreMonomorphic();
}
static void Clear(Address address, Code* target);
@@ -342,41 +366,53 @@ class LoadIC: public IC {
class KeyedIC: public IC {
public:
+ enum StubKind {
+ LOAD,
+ STORE_NO_TRANSITION,
+ STORE_TRANSITION_SMI_TO_OBJECT,
+ STORE_TRANSITION_SMI_TO_DOUBLE,
+ STORE_TRANSITION_DOUBLE_TO_OBJECT
+ };
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
- virtual MaybeObject* GetElementStubWithoutMapCheck(
+ virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind) = 0;
protected:
- virtual Code* string_stub() {
- return NULL;
+ virtual Handle<Code> string_stub() {
+ return Handle<Code>::null();
}
virtual Code::Kind kind() const = 0;
- MaybeObject* ComputeStub(JSObject* receiver,
- bool is_store,
+ Handle<Code> ComputeStub(Handle<JSObject> receiver,
+ StubKind stub_kind,
StrictModeFlag strict_mode,
- Code* default_stub);
+ Handle<Code> default_stub);
- virtual MaybeObject* ConstructMegamorphicStub(
- MapList* receiver_maps,
- CodeList* targets,
- StrictModeFlag strict_mode) = 0;
-
- private:
- void GetReceiverMapsForStub(Code* stub, MapList* result);
+ virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+ StrictModeFlag strict_mode) = 0;
- MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
- Map* receiver_map,
+ Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
+ Handle<Map> receiver_map,
StrictModeFlag strict_mode);
- MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
- bool is_store,
+ private:
+ void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
+
+ Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
+ StubKind stub_kind,
StrictModeFlag strict_mode,
- Code* default_stub);
+ Handle<Code> default_stub);
+
+ Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
+ StubKind stub_kind);
+
+ static bool IsTransitionStubKind(StubKind stub_kind) {
+ return stub_kind > STORE_NO_TRANSITION;
+ }
};
@@ -412,21 +448,18 @@ class KeyedLoadIC: public KeyedIC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- virtual MaybeObject* GetElementStubWithoutMapCheck(
+ virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind);
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
- virtual MaybeObject* ConstructMegamorphicStub(
- MapList* receiver_maps,
- CodeList* targets,
- StrictModeFlag strict_mode);
+ virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+ StrictModeFlag strict_mode);
- virtual Code* string_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_String);
+ virtual Handle<Code> string_stub() {
+ return isolate()->builtins()->KeyedLoadIC_String();
}
private:
@@ -441,25 +474,20 @@ class KeyedLoadIC: public KeyedIC {
return Isolate::Current()->builtins()->builtin(
Builtins::kKeyedLoadIC_Initialize);
}
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Generic);
+ Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
}
- Code* generic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Generic);
+ Handle<Code> generic_stub() {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
}
- Code* pre_monomorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_PreMonomorphic);
+ Handle<Code> pre_monomorphic_stub() {
+ return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
}
- Code* indexed_interceptor_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_IndexedInterceptor);
+ Handle<Code> indexed_interceptor_stub() {
+ return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
}
- Code* non_strict_arguments_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_NonStrictArguments);
+ Handle<Code> non_strict_arguments_stub() {
+ return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
}
static void Clear(Address address, Code* target);
@@ -524,13 +552,11 @@ class StoreIC: public IC {
return Isolate::Current()->builtins()->builtin(
Builtins::kStoreIC_Initialize_Strict);
}
- Code* global_proxy_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_GlobalProxy);
+ Handle<Code> global_proxy_stub() {
+ return isolate()->builtins()->StoreIC_GlobalProxy();
}
- Code* global_proxy_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_GlobalProxy_Strict);
+ Handle<Code> global_proxy_stub_strict() {
+ return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
}
static void Clear(Address address, Code* target);
@@ -562,18 +588,18 @@ class KeyedStoreIC: public KeyedIC {
StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
static void GenerateNonStrictArguments(MacroAssembler* masm);
+ static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
+ static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
- virtual MaybeObject* GetElementStubWithoutMapCheck(
+ virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind);
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual MaybeObject* ConstructMegamorphicStub(
- MapList* receiver_maps,
- CodeList* targets,
- StrictModeFlag strict_mode);
+ virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+ StrictModeFlag strict_mode);
private:
// Update the inline cache.
@@ -596,29 +622,24 @@ class KeyedStoreIC: public KeyedIC {
return Isolate::Current()->builtins()->builtin(
Builtins::kKeyedStoreIC_Initialize);
}
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic);
- }
static Code* initialize_stub_strict() {
return Isolate::Current()->builtins()->builtin(
Builtins::kKeyedStoreIC_Initialize_Strict);
}
- Code* megamorphic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic_Strict);
+ Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
}
- Code* generic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic);
+ Handle<Code> megamorphic_stub_strict() {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
- Code* generic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Generic_Strict);
+ Handle<Code> generic_stub() {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
}
- Code* non_strict_arguments_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kKeyedStoreIC_NonStrictArguments);
+ Handle<Code> generic_stub_strict() {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ }
+ Handle<Code> non_strict_arguments_stub() {
+ return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
}
static void Clear(Address address, Code* target);
diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h
new file mode 100644
index 000000000..7ae2c99a0
--- /dev/null
+++ b/deps/v8/src/incremental-marking-inl.h
@@ -0,0 +1,133 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_INL_H_
+#define V8_INCREMENTAL_MARKING_INL_H_
+
+#include "incremental-marking.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
+ Object** slot,
+ Object* value) {
+ MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+ if (Marking::IsWhite(value_bit)) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ }
+
+ // Object is either grey or white. It will be scanned if survives.
+ return false;
+ }
+ return true;
+}
+
+
+void IncrementalMarking::RecordWrite(HeapObject* obj,
+ Object** slot,
+ Object* value) {
+ if (IsMarking() && value->NonFailureIsHeapObject()) {
+ RecordWriteSlow(obj, slot, value);
+ }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+ Object** slot,
+ Code* value) {
+ if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value) {
+ if (IsMarking() && value->NonFailureIsHeapObject()) {
+ RecordWriteIntoCodeSlow(obj, rinfo, value);
+ }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+ if (IsMarking()) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ }
+ }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+ MarkBit mark_bit) {
+ ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+ ASSERT(obj->Size() >= 2*kPointerSize);
+ ASSERT(IsMarking());
+ Marking::BlackToGrey(mark_bit);
+ int obj_size = obj->Size();
+ MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
+ bytes_scanned_ -= obj_size;
+ int64_t old_bytes_rescanned = bytes_rescanned_;
+ bytes_rescanned_ = old_bytes_rescanned + obj_size;
+ if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+ if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+ // If we have queued twice the heap size for rescanning then we are
+ // going around in circles, scanning the same objects again and again
+ // as the program mutates the heap faster than we can incrementally
+ // trace it. In this case we switch to non-incremental marking in
+ // order to finish off this marking phase.
+ if (FLAG_trace_gc) {
+ PrintF("Hurrying incremental marking because of lack of progress\n");
+ }
+ allocation_marking_factor_ = kMaxAllocationMarkingFactor;
+ }
+ }
+
+ marking_deque_.UnshiftGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+ WhiteToGrey(obj, mark_bit);
+ marking_deque_.PushGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
+ Marking::WhiteToGrey(mark_bit);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
new file mode 100644
index 000000000..dd54c6307
--- /dev/null
+++ b/deps/v8/src/incremental-marking.cc
@@ -0,0 +1,920 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "incremental-marking.h"
+
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+
+IncrementalMarking::IncrementalMarking(Heap* heap)
+ : heap_(heap),
+ state_(STOPPED),
+ marking_deque_memory_(NULL),
+ marking_deque_memory_committed_(false),
+ steps_count_(0),
+ steps_took_(0),
+ longest_step_(0.0),
+ old_generation_space_available_at_start_of_incremental_(0),
+ old_generation_space_used_at_start_of_incremental_(0),
+ steps_count_since_last_gc_(0),
+ steps_took_since_last_gc_(0),
+ should_hurry_(false),
+ allocation_marking_factor_(0),
+ allocated_(0),
+ no_marking_scope_depth_(0) {
+}
+
+
+void IncrementalMarking::TearDown() {
+ delete marking_deque_memory_;
+}
+
+
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+ Object** slot,
+ Object* value) {
+ if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ // Object is not going to be rescanned we need to record the slot.
+ heap_->mark_compact_collector()->RecordSlot(
+ HeapObject::RawField(obj, 0), slot, value);
+ }
+ }
+}
+
+
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
+ Object* value,
+ Isolate* isolate) {
+ ASSERT(obj->IsHeapObject());
+
+ // Fast cases should already be covered by RecordWriteStub.
+ ASSERT(value->IsHeapObject());
+ ASSERT(!value->IsHeapNumber());
+ ASSERT(!value->IsString() ||
+ value->IsConsString() ||
+ value->IsSlicedString());
+ ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
+
+ IncrementalMarking* marking = isolate->heap()->incremental_marking();
+ ASSERT(!marking->is_compacting_);
+ marking->RecordWrite(obj, NULL, value);
+}
+
+
+void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
+ Object** slot,
+ Isolate* isolate) {
+ IncrementalMarking* marking = isolate->heap()->incremental_marking();
+ ASSERT(marking->is_compacting_);
+ marking->RecordWrite(obj, slot, *slot);
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Code* host,
+ Address pc,
+ HeapObject* value) {
+ if (IsMarking()) {
+ RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RecordWriteIntoCode(host, &rinfo, value);
+ }
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
+ if (IsMarking()) {
+ Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
+ GcSafeFindCodeForInnerPointer(pc);
+ RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RecordWriteIntoCode(host, &rinfo, value);
+ }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
+ Object** slot,
+ Code* value) {
+ if (BaseRecordWrite(host, slot, value) && is_compacting_) {
+ ASSERT(slot != NULL);
+ heap_->mark_compact_collector()->
+ RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
+ }
+}
+
+
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value) {
+ MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+ if (Marking::IsWhite(value_bit)) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ }
+ // Object is either grey or white. It will be scanned if survives.
+ return;
+ }
+
+ if (is_compacting_) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ // Object is not going to be rescanned. We need to record the slot.
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+ Code::cast(value));
+ }
+ }
+}
+
+
+class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
+ public:
+ IncrementalMarkingMarkingVisitor(Heap* heap,
+ IncrementalMarking* incremental_marking)
+ : heap_(heap),
+ incremental_marking_(incremental_marking) {
+ }
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ Object* target = rinfo->target_object();
+ if (target->NonFailureIsHeapObject()) {
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ MarkObject(target);
+ }
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+ MarkObject(target);
+ }
+
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+ MarkObject(target);
+ }
+
+ void VisitCodeEntry(Address entry_address) {
+ Object* target = Code::GetObjectFromEntryAddress(entry_address);
+ heap_->mark_compact_collector()->
+ RecordCodeEntrySlot(entry_address, Code::cast(target));
+ MarkObject(target);
+ }
+
+ void VisitPointer(Object** p) {
+ Object* obj = *p;
+ if (obj->NonFailureIsHeapObject()) {
+ heap_->mark_compact_collector()->RecordSlot(p, p, obj);
+ MarkObject(obj);
+ }
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ Object* obj = *p;
+ if (obj->NonFailureIsHeapObject()) {
+ heap_->mark_compact_collector()->RecordSlot(start, p, obj);
+ MarkObject(obj);
+ }
+ }
+ }
+
+ private:
+ // Mark object pointed to by p.
+ INLINE(void MarkObject(Object* obj)) {
+ HeapObject* heap_object = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+ if (mark_bit.data_only()) {
+ if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+ MemoryChunk::IncrementLiveBytes(heap_object->address(),
+ heap_object->Size());
+ }
+ } else if (Marking::IsWhite(mark_bit)) {
+ incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+ }
+ }
+
+ Heap* heap_;
+ IncrementalMarking* incremental_marking_;
+};
+
+
+class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+ public:
+ IncrementalMarkingRootMarkingVisitor(Heap* heap,
+ IncrementalMarking* incremental_marking)
+ : heap_(heap),
+ incremental_marking_(incremental_marking) {
+ }
+
+ void VisitPointer(Object** p) {
+ MarkObjectByPointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ }
+
+ private:
+ void MarkObjectByPointer(Object** p) {
+ Object* obj = *p;
+ if (!obj->IsHeapObject()) return;
+
+ HeapObject* heap_object = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+ if (mark_bit.data_only()) {
+ if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+ MemoryChunk::IncrementLiveBytes(heap_object->address(),
+ heap_object->Size());
+ }
+ } else {
+ if (Marking::IsWhite(mark_bit)) {
+ incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+ }
+ }
+ }
+
+ Heap* heap_;
+ IncrementalMarking* incremental_marking_;
+};
+
+
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+ bool is_marking,
+ bool is_compacting) {
+ if (is_marking) {
+ chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+ // It's difficult to filter out slots recorded for large objects.
+ if (chunk->owner()->identity() == LO_SPACE &&
+ chunk->size() > static_cast<size_t>(Page::kPageSize) &&
+ is_compacting) {
+ chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+ }
+ } else if (chunk->owner()->identity() == CELL_SPACE ||
+ chunk->scan_on_scavenge()) {
+ chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+ bool is_marking) {
+ chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ if (is_marking) {
+ chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+ chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+ PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ SetOldSpacePageFlags(p, false, false);
+ }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+ NewSpace* space) {
+ NewSpacePageIterator it(space);
+ while (it.has_next()) {
+ NewSpacePage* p = it.next();
+ SetNewSpacePageFlags(p, false);
+ }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
+ DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
+ DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+
+ LargePage* lop = heap_->lo_space()->first_page();
+ while (lop->is_valid()) {
+ SetOldSpacePageFlags(lop, false, false);
+ lop = lop->next_page();
+ }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ SetOldSpacePageFlags(p, true, is_compacting_);
+ }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
+ NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+ while (it.has_next()) {
+ NewSpacePage* p = it.next();
+ SetNewSpacePageFlags(p, true);
+ }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier() {
+ ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
+ ActivateIncrementalWriteBarrier(heap_->old_data_space());
+ ActivateIncrementalWriteBarrier(heap_->cell_space());
+ ActivateIncrementalWriteBarrier(heap_->map_space());
+ ActivateIncrementalWriteBarrier(heap_->code_space());
+ ActivateIncrementalWriteBarrier(heap_->new_space());
+
+ LargePage* lop = heap_->lo_space()->first_page();
+ while (lop->is_valid()) {
+ SetOldSpacePageFlags(lop, true, is_compacting_);
+ lop = lop->next_page();
+ }
+}
+
+
+bool IncrementalMarking::WorthActivating() {
+#ifndef DEBUG
+ static const intptr_t kActivationThreshold = 8 * MB;
+#else
+ // TODO(gc) consider setting this to some low level so that some
+ // debug tests run with incremental marking and some without.
+ static const intptr_t kActivationThreshold = 0;
+#endif
+
+ return !FLAG_expose_gc &&
+ FLAG_incremental_marking &&
+ !Serializer::enabled() &&
+ heap_->PromotedSpaceSize() > kActivationThreshold;
+}
+
+
+void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
+ ASSERT(RecordWriteStub::GetMode(stub) ==
+ RecordWriteStub::STORE_BUFFER_ONLY);
+
+ if (!IsMarking()) {
+ // Initially stub is generated in STORE_BUFFER_ONLY mode thus
+ // we don't need to do anything if incremental marking is
+ // not active.
+ } else if (IsCompacting()) {
+ RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
+ } else {
+ RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
+ }
+}
+
+
+static void PatchIncrementalMarkingRecordWriteStubs(
+ Heap* heap, RecordWriteStub::Mode mode) {
+ NumberDictionary* stubs = heap->code_stubs();
+
+ int capacity = stubs->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = stubs->KeyAt(i);
+ if (stubs->IsKey(k)) {
+ uint32_t key = NumberToUint32(k);
+
+ if (CodeStub::MajorKeyFromKey(key) ==
+ CodeStub::RecordWrite) {
+ Object* e = stubs->ValueAt(i);
+ if (e->IsCode()) {
+ RecordWriteStub::Patch(Code::cast(e), mode);
+ }
+ }
+ }
+ }
+}
+
+
+void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
+ if (marking_deque_memory_ == NULL) {
+ marking_deque_memory_ = new VirtualMemory(4 * MB);
+ }
+ if (!marking_deque_memory_committed_) {
+ bool success = marking_deque_memory_->Commit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()),
+ marking_deque_memory_->size(),
+ false); // Not executable.
+ CHECK(success);
+ marking_deque_memory_committed_ = true;
+ }
+}
+
+void IncrementalMarking::UncommitMarkingDeque() {
+ if (state_ == STOPPED && marking_deque_memory_committed_) {
+ bool success = marking_deque_memory_->Uncommit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()),
+ marking_deque_memory_->size());
+ CHECK(success);
+ marking_deque_memory_committed_ = false;
+ }
+}
+
+
+void IncrementalMarking::Start() {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Start\n");
+ }
+ ASSERT(FLAG_incremental_marking);
+ ASSERT(state_ == STOPPED);
+
+ ResetStepCounters();
+
+ if (heap_->old_pointer_space()->IsSweepingComplete() &&
+ heap_->old_data_space()->IsSweepingComplete()) {
+ StartMarking(ALLOW_COMPACTION);
+ } else {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Start sweeping.\n");
+ }
+ state_ = SWEEPING;
+ }
+
+ heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+}
+
+
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+ if (Marking::IsBlack(mark_bit)) {
+ MemoryChunk::IncrementLiveBytes(heap_obj->address(),
+ -heap_obj->Size());
+ }
+ Marking::AnyToGrey(mark_bit);
+ }
+}
+
+
+void IncrementalMarking::StartMarking(CompactionFlag flag) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Start marking\n");
+ }
+
+ is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
+ heap_->mark_compact_collector()->StartCompaction();
+
+ state_ = MARKING;
+
+ RecordWriteStub::Mode mode = is_compacting_ ?
+ RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
+
+ PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
+
+ EnsureMarkingDequeIsCommitted();
+
+ // Initialize marking stack.
+ Address addr = static_cast<Address>(marking_deque_memory_->address());
+ size_t size = marking_deque_memory_->size();
+ if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+ marking_deque_.Initialize(addr, addr + size);
+
+ ActivateIncrementalWriteBarrier();
+
+#ifdef DEBUG
+ // Marking bits are cleared by the sweeper.
+ if (FLAG_verify_heap) {
+ heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
+ }
+#endif
+
+ heap_->CompletelyClearInstanceofCache();
+ heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+
+ if (FLAG_cleanup_code_caches_at_gc) {
+ // We will mark cache black with a separate pass
+ // when we finish marking.
+ MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
+ }
+
+ // Mark strong roots grey.
+ IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
+ heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+
+ // Ready to start incremental marking.
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Running\n");
+ }
+}
+
+
+void IncrementalMarking::PrepareForScavenge() {
+ if (!IsMarking()) return;
+ NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
+ heap_->new_space()->FromSpaceEnd());
+ while (it.has_next()) {
+ Bitmap::Clear(it.next());
+ }
+}
+
+
+void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+ if (!IsMarking()) return;
+
+ int current = marking_deque_.bottom();
+ int mask = marking_deque_.mask();
+ int limit = marking_deque_.top();
+ HeapObject** array = marking_deque_.array();
+ int new_top = current;
+
+ Map* filler_map = heap_->one_pointer_filler_map();
+
+ while (current != limit) {
+ HeapObject* obj = array[current];
+ ASSERT(obj->IsHeapObject());
+ current = ((current + 1) & mask);
+ if (heap_->InNewSpace(obj)) {
+ MapWord map_word = obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ HeapObject* dest = map_word.ToForwardingAddress();
+ array[new_top] = dest;
+ new_top = ((new_top + 1) & mask);
+ ASSERT(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ ASSERT(Marking::IsGrey(mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+ }
+ } else if (obj->map() != filler_map) {
+ // Skip one word filler objects that appear on the
+ // stack when we perform in place array shift.
+ array[new_top] = obj;
+ new_top = ((new_top + 1) & mask);
+ ASSERT(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ ASSERT(Marking::IsGrey(mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+ }
+ }
+ marking_deque_.set_top(new_top);
+
+ steps_took_since_last_gc_ = 0;
+ steps_count_since_last_gc_ = 0;
+ longest_step_ = 0.0;
+}
+
+
+void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
+ v->VisitPointers(
+ HeapObject::RawField(
+ ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
+ HeapObject::RawField(
+ ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
+
+ MarkCompactCollector* collector = heap_->mark_compact_collector();
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::GLOBAL_CONTEXT_SLOTS;
+ ++idx) {
+ Object** slot =
+ HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
+ collector->RecordSlot(slot, slot, *slot);
+ }
+}
+
+
+void IncrementalMarking::Hurry() {
+ if (state() == MARKING) {
+ double start = 0.0;
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Hurry\n");
+ start = OS::TimeCurrentMillis();
+ }
+ // TODO(gc) hurry can mark objects it encounters black as mutator
+ // was stopped.
+ Map* filler_map = heap_->one_pointer_filler_map();
+ Map* global_context_map = heap_->global_context_map();
+ IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+ while (!marking_deque_.IsEmpty()) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) {
+ continue;
+ } else if (map == global_context_map) {
+ // Global contexts have weak fields.
+ VisitGlobalContext(Context::cast(obj), &marking_visitor);
+ } else {
+ obj->Iterate(&marking_visitor);
+ }
+
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ ASSERT(!Marking::IsBlack(mark_bit));
+ Marking::MarkBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+ }
+ state_ = COMPLETE;
+ if (FLAG_trace_incremental_marking) {
+ double end = OS::TimeCurrentMillis();
+ PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+ static_cast<int>(end - start));
+ }
+ }
+
+ if (FLAG_cleanup_code_caches_at_gc) {
+ PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
+ Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
+ MemoryChunk::IncrementLiveBytes(poly_cache->address(),
+ PolymorphicCodeCache::kSize);
+ }
+
+ Object* context = heap_->global_contexts_list();
+ while (!context->IsUndefined()) {
+ NormalizedMapCache* cache = Context::cast(context)->normalized_map_cache();
+ MarkBit mark_bit = Marking::MarkBitFrom(cache);
+ if (Marking::IsGrey(mark_bit)) {
+ Marking::GreyToBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
+ }
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ }
+}
+
+
+void IncrementalMarking::Abort() {
+ if (IsStopped()) return;
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Aborting.\n");
+ }
+ heap_->new_space()->LowerInlineAllocationLimit(0);
+ IncrementalMarking::set_should_hurry(false);
+ ResetStepCounters();
+ if (IsMarking()) {
+ PatchIncrementalMarkingRecordWriteStubs(heap_,
+ RecordWriteStub::STORE_BUFFER_ONLY);
+ DeactivateIncrementalWriteBarrier();
+
+ if (is_compacting_) {
+ LargeObjectIterator it(heap_->lo_space());
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ Page* p = Page::FromAddress(obj->address());
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+ }
+ }
+ }
+ }
+ heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+ state_ = STOPPED;
+ is_compacting_ = false;
+}
+
+
+void IncrementalMarking::Finalize() {
+ Hurry();
+ state_ = STOPPED;
+ is_compacting_ = false;
+ heap_->new_space()->LowerInlineAllocationLimit(0);
+ IncrementalMarking::set_should_hurry(false);
+ ResetStepCounters();
+ PatchIncrementalMarkingRecordWriteStubs(heap_,
+ RecordWriteStub::STORE_BUFFER_ONLY);
+ DeactivateIncrementalWriteBarrier();
+ ASSERT(marking_deque_.IsEmpty());
+ heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+}
+
+
+void IncrementalMarking::MarkingComplete() {
+ state_ = COMPLETE;
+ // We will set the stack guard to request a GC now. This will mean the rest
+ // of the GC gets performed as soon as possible (we can't do a GC here in a
+ // record-write context). If a few things get allocated between now and then
+ // that shouldn't make us do a scavenge and keep being incremental, so we set
+ // the should-hurry flag to indicate that there can't be much work left to do.
+ set_should_hurry(true);
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Complete (normal).\n");
+ }
+ if (!heap_->idle_notification_will_schedule_next_gc()) {
+ heap_->isolate()->stack_guard()->RequestGC();
+ }
+}
+
+
+void IncrementalMarking::Step(intptr_t allocated_bytes) {
+ if (heap_->gc_state() != Heap::NOT_IN_GC ||
+ !FLAG_incremental_marking ||
+ !FLAG_incremental_marking_steps ||
+ (state_ != SWEEPING && state_ != MARKING)) {
+ return;
+ }
+
+ allocated_ += allocated_bytes;
+
+ if (allocated_ < kAllocatedThreshold) return;
+
+ if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+
+ intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+ bytes_scanned_ += bytes_to_process;
+
+ double start = 0;
+
+ if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+ start = OS::TimeCurrentMillis();
+ }
+
+ if (state_ == SWEEPING) {
+ if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
+ bytes_scanned_ = 0;
+ StartMarking(PREVENT_COMPACTION);
+ }
+ } else if (state_ == MARKING) {
+ Map* filler_map = heap_->one_pointer_filler_map();
+ Map* global_context_map = heap_->global_context_map();
+ IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+ while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) continue;
+
+ int size = obj->SizeFromMap(map);
+ bytes_to_process -= size;
+ MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+ if (Marking::IsWhite(map_mark_bit)) {
+ WhiteToGreyAndPush(map, map_mark_bit);
+ }
+
+ // TODO(gc) switch to static visitor instead of normal visitor.
+ if (map == global_context_map) {
+ // Global contexts have weak fields.
+ Context* ctx = Context::cast(obj);
+
+ // We will mark cache black with a separate pass
+ // when we finish marking.
+ MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
+
+ VisitGlobalContext(ctx, &marking_visitor);
+ } else {
+ obj->IterateBody(map->instance_type(), size, &marking_visitor);
+ }
+
+ MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+ SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+ Marking::MarkBlack(obj_mark_bit);
+ MemoryChunk::IncrementLiveBytes(obj->address(), size);
+ }
+ if (marking_deque_.IsEmpty()) MarkingComplete();
+ }
+
+ allocated_ = 0;
+
+ steps_count_++;
+ steps_count_since_last_gc_++;
+
+ bool speed_up = false;
+
+ if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
+ if (FLAG_trace_gc) {
+ PrintF("Speed up marking after %d steps\n",
+ static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+ }
+ speed_up = true;
+ }
+
+ bool space_left_is_very_small =
+ (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+ bool only_1_nth_of_space_that_was_available_still_left =
+ (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
+ old_generation_space_available_at_start_of_incremental_);
+
+ if (space_left_is_very_small ||
+ only_1_nth_of_space_that_was_available_still_left) {
+ if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+ speed_up = true;
+ }
+
+ bool size_of_old_space_multiplied_by_n_during_marking =
+ (heap_->PromotedTotalSize() >
+ (allocation_marking_factor_ + 1) *
+ old_generation_space_used_at_start_of_incremental_);
+ if (size_of_old_space_multiplied_by_n_during_marking) {
+ speed_up = true;
+ if (FLAG_trace_gc) {
+ PrintF("Speed up marking because of heap size increase\n");
+ }
+ }
+
+ int64_t promoted_during_marking = heap_->PromotedTotalSize()
+ - old_generation_space_used_at_start_of_incremental_;
+ intptr_t delay = allocation_marking_factor_ * MB;
+ intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+ // We try to scan at at least twice the speed that we are allocating.
+ if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+ if (FLAG_trace_gc) {
+ PrintF("Speed up marking because marker was not keeping up\n");
+ }
+ speed_up = true;
+ }
+
+ if (speed_up) {
+ if (state_ != MARKING) {
+ if (FLAG_trace_gc) {
+ PrintF("Postponing speeding up marking until marking starts\n");
+ }
+ } else {
+ allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
+ allocation_marking_factor_ = static_cast<int>(
+ Min(kMaxAllocationMarkingFactor,
+ static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
+ if (FLAG_trace_gc) {
+ PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+ }
+ }
+ }
+
+ if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+ double end = OS::TimeCurrentMillis();
+ double delta = (end - start);
+ longest_step_ = Max(longest_step_, delta);
+ steps_took_ += delta;
+ steps_took_since_last_gc_ += delta;
+ }
+}
+
+
+void IncrementalMarking::ResetStepCounters() {
+ steps_count_ = 0;
+ steps_took_ = 0;
+ longest_step_ = 0.0;
+ old_generation_space_available_at_start_of_incremental_ =
+ SpaceLeftInOldSpace();
+ old_generation_space_used_at_start_of_incremental_ =
+ heap_->PromotedTotalSize();
+ steps_count_since_last_gc_ = 0;
+ steps_took_since_last_gc_ = 0;
+ bytes_rescanned_ = 0;
+ allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+ bytes_scanned_ = 0;
+}
+
+
+int64_t IncrementalMarking::SpaceLeftInOldSpace() {
+ return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
new file mode 100644
index 000000000..25def8706
--- /dev/null
+++ b/deps/v8/src/incremental-marking.h
@@ -0,0 +1,281 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_H_
+#define V8_INCREMENTAL_MARKING_H_
+
+
+#include "execution.h"
+#include "mark-compact.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class IncrementalMarking {
+ public:
+ enum State {
+ STOPPED,
+ SWEEPING,
+ MARKING,
+ COMPLETE
+ };
+
+ explicit IncrementalMarking(Heap* heap);
+
+ void TearDown();
+
+ State state() {
+ ASSERT(state_ == STOPPED || FLAG_incremental_marking);
+ return state_;
+ }
+
+ bool should_hurry() { return should_hurry_; }
+
+ inline bool IsStopped() { return state() == STOPPED; }
+
+ INLINE(bool IsMarking()) { return state() >= MARKING; }
+
+ inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+ inline bool IsComplete() { return state() == COMPLETE; }
+
+ bool WorthActivating();
+
+ void Start();
+
+ void Stop();
+
+ void PrepareForScavenge();
+
+ void UpdateMarkingDequeAfterScavenge();
+
+ void Hurry();
+
+ void Finalize();
+
+ void Abort();
+
+ void MarkingComplete();
+
+ // It's hard to know how much work the incremental marker should do to make
+ // progress in the face of the mutator creating new work for it. We start
+ // of at a moderate rate of work and gradually increase the speed of the
+ // incremental marker until it completes.
+ // Do some marking every time this much memory has been allocated.
+ static const intptr_t kAllocatedThreshold = 65536;
+ // Start off by marking this many times more memory than has been allocated.
+ static const intptr_t kInitialAllocationMarkingFactor = 1;
+ // But if we are promoting a lot of data we need to mark faster to keep up
+ // with the data that is entering the old space through promotion.
+ static const intptr_t kFastMarking = 3;
+ // After this many steps we increase the marking/allocating factor.
+ static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
+ // This is how much we increase the marking/allocating factor by.
+ static const intptr_t kAllocationMarkingFactorSpeedup = 2;
+ static const intptr_t kMaxAllocationMarkingFactor = 1000;
+
+ void OldSpaceStep(intptr_t allocated) {
+ Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+ }
+
+ void Step(intptr_t allocated);
+
+ inline void RestartIfNotMarking() {
+ if (state_ == COMPLETE) {
+ state_ = MARKING;
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
+ }
+ }
+ }
+
+ static void RecordWriteFromCode(HeapObject* obj,
+ Object* value,
+ Isolate* isolate);
+
+ static void RecordWriteForEvacuationFromCode(HeapObject* obj,
+ Object** slot,
+ Isolate* isolate);
+
+ INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+ INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+ INLINE(void RecordWriteIntoCode(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value));
+ INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
+ Object** slot,
+ Code* value));
+
+
+ void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+ void RecordWriteIntoCodeSlow(HeapObject* obj,
+ RelocInfo* rinfo,
+ Object* value);
+ void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
+ void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
+ void RecordCodeTargetPatch(Address pc, HeapObject* value);
+
+ inline void RecordWrites(HeapObject* obj);
+
+ inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+
+ inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+
+ inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
+
+ // Does white->black or keeps gray or black color. Returns true if converting
+ // white to black.
+ inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (mark_bit.Get()) {
+ // Grey or black: Keep the color.
+ return false;
+ }
+ mark_bit.Set();
+ ASSERT(Marking::IsBlack(mark_bit));
+ return true;
+ }
+
+ inline int steps_count() {
+ return steps_count_;
+ }
+
+ inline double steps_took() {
+ return steps_took_;
+ }
+
+ inline double longest_step() {
+ return longest_step_;
+ }
+
+ inline int steps_count_since_last_gc() {
+ return steps_count_since_last_gc_;
+ }
+
+ inline double steps_took_since_last_gc() {
+ return steps_took_since_last_gc_;
+ }
+
+ inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+ SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
+ }
+
+ inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+ SetNewSpacePageFlags(chunk, IsMarking());
+ }
+
+ MarkingDeque* marking_deque() { return &marking_deque_; }
+
+ bool IsCompacting() { return IsMarking() && is_compacting_; }
+
+ void ActivateGeneratedStub(Code* stub);
+
+ void NotifyOfHighPromotionRate() {
+ if (IsMarking()) {
+ if (allocation_marking_factor_ < kFastMarking) {
+ if (FLAG_trace_gc) {
+ PrintF("Increasing marking speed to %d due to high promotion rate\n",
+ static_cast<int>(kFastMarking));
+ }
+ allocation_marking_factor_ = kFastMarking;
+ }
+ }
+ }
+
+ void EnterNoMarkingScope() {
+ no_marking_scope_depth_++;
+ }
+
+ void LeaveNoMarkingScope() {
+ no_marking_scope_depth_--;
+ }
+
+ void UncommitMarkingDeque();
+
+ private:
+ void set_should_hurry(bool val) {
+ should_hurry_ = val;
+ }
+
+ int64_t SpaceLeftInOldSpace();
+
+ void ResetStepCounters();
+
+ enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+ void StartMarking(CompactionFlag flag);
+
+ void ActivateIncrementalWriteBarrier(PagedSpace* space);
+ static void ActivateIncrementalWriteBarrier(NewSpace* space);
+ void ActivateIncrementalWriteBarrier();
+
+ static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+ static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+ void DeactivateIncrementalWriteBarrier();
+
+ static void SetOldSpacePageFlags(MemoryChunk* chunk,
+ bool is_marking,
+ bool is_compacting);
+
+ static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+
+ void EnsureMarkingDequeIsCommitted();
+
+ void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
+
+ Heap* heap_;
+
+ State state_;
+ bool is_compacting_;
+
+ VirtualMemory* marking_deque_memory_;
+ bool marking_deque_memory_committed_;
+ MarkingDeque marking_deque_;
+
+ int steps_count_;
+ double steps_took_;
+ double longest_step_;
+ int64_t old_generation_space_available_at_start_of_incremental_;
+ int64_t old_generation_space_used_at_start_of_incremental_;
+ int steps_count_since_last_gc_;
+ double steps_took_since_last_gc_;
+ int64_t bytes_rescanned_;
+ bool should_hurry_;
+ int allocation_marking_factor_;
+ intptr_t bytes_scanned_;
+ intptr_t allocated_;
+
+ int no_marking_scope_depth_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_INCREMENTAL_MARKING_H_
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 796a447e2..b337e8845 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -33,9 +33,9 @@
#include "utils.h"
#include "ast.h"
#include "bytecodes-irregexp.h"
+#include "jsregexp.h"
#include "interpreter-irregexp.h"
-
namespace v8 {
namespace internal {
@@ -187,12 +187,12 @@ class BacktrackStack {
template <typename Char>
-static bool RawMatch(Isolate* isolate,
- const byte* code_base,
- Vector<const Char> subject,
- int* registers,
- int current,
- uint32_t current_char) {
+static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
+ const byte* code_base,
+ Vector<const Char> subject,
+ int* registers,
+ int current,
+ uint32_t current_char) {
const byte* pc = code_base;
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
@@ -211,24 +211,24 @@ static bool RawMatch(Isolate* isolate,
switch (insn & BYTECODE_MASK) {
BYTECODE(BREAK)
UNREACHABLE();
- return false;
+ return RegExpImpl::RE_FAILURE;
BYTECODE(PUSH_CP)
if (--backtrack_stack_space < 0) {
- return false; // No match on backtrack stack overflow.
+ return RegExpImpl::RE_EXCEPTION;
}
*backtrack_sp++ = current;
pc += BC_PUSH_CP_LENGTH;
break;
BYTECODE(PUSH_BT)
if (--backtrack_stack_space < 0) {
- return false; // No match on backtrack stack overflow.
+ return RegExpImpl::RE_EXCEPTION;
}
*backtrack_sp++ = Load32Aligned(pc + 4);
pc += BC_PUSH_BT_LENGTH;
break;
BYTECODE(PUSH_REGISTER)
if (--backtrack_stack_space < 0) {
- return false; // No match on backtrack stack overflow.
+ return RegExpImpl::RE_EXCEPTION;
}
*backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
pc += BC_PUSH_REGISTER_LENGTH;
@@ -278,9 +278,9 @@ static bool RawMatch(Isolate* isolate,
pc += BC_POP_REGISTER_LENGTH;
break;
BYTECODE(FAIL)
- return false;
+ return RegExpImpl::RE_FAILURE;
BYTECODE(SUCCEED)
- return true;
+ return RegExpImpl::RE_SUCCESS;
BYTECODE(ADVANCE_CP)
current += insn >> BYTECODE_SHIFT;
pc += BC_ADVANCE_CP_LENGTH;
@@ -625,11 +625,12 @@ static bool RawMatch(Isolate* isolate,
}
-bool IrregexpInterpreter::Match(Isolate* isolate,
- Handle<ByteArray> code_array,
- Handle<String> subject,
- int* registers,
- int start_position) {
+RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
+ Isolate* isolate,
+ Handle<ByteArray> code_array,
+ Handle<String> subject,
+ int* registers,
+ int start_position) {
ASSERT(subject->IsFlat());
AssertNoAllocation a;
diff --git a/deps/v8/src/interpreter-irregexp.h b/deps/v8/src/interpreter-irregexp.h
index 076f0c508..0f45d9820 100644
--- a/deps/v8/src/interpreter-irregexp.h
+++ b/deps/v8/src/interpreter-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,11 +36,11 @@ namespace internal {
class IrregexpInterpreter {
public:
- static bool Match(Isolate* isolate,
- Handle<ByteArray> code,
- Handle<String> subject,
- int* captures,
- int start_position);
+ static RegExpImpl::IrregexpResult Match(Isolate* isolate,
+ Handle<ByteArray> code,
+ Handle<String> subject,
+ int* captures,
+ int start_position);
};
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index aa6b5372c..0a2c17404 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -36,6 +36,19 @@ namespace v8 {
namespace internal {
+SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+ if (isolate->context() != NULL) {
+ context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+ dummy_ = Handle<Context>(isolate->context());
+#endif
+ }
+ isolate->set_save_context(this);
+
+ c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
+}
+
+
bool Isolate::DebuggerHasBreakPoints() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return debug()->has_break_points();
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index fd0f673e7..c235a2343 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -98,6 +98,15 @@ void ThreadLocalTop::InitializeInternal() {
failed_access_check_callback_ = NULL;
save_context_ = NULL;
catcher_ = NULL;
+ top_lookup_result_ = NULL;
+
+ // These members are re-initialized later after deserialization
+ // is complete.
+ pending_exception_ = NULL;
+ has_pending_message_ = false;
+ pending_message_obj_ = NULL;
+ pending_message_script_ = NULL;
+ scheduled_exception_ = NULL;
}
@@ -472,6 +481,9 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
it.frame()->Iterate(v);
}
+
+ // Iterate pointers in live lookup results.
+ thread->top_lookup_result_->Iterate(v);
}
@@ -1060,6 +1072,16 @@ void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
location, HandleVector<Object>(&exception_handle, 1), stack_trace,
stack_trace_object);
+ } else if (location != NULL && !location->script().is_null()) {
+ // We are bootstrapping and caught an error where the location is set
+ // and we have a script for the location.
+ // In this case we could have an extension (or an internal error
+ // somewhere) and we print out the line number at which the error occured
+ // to the console for easier debugging.
+ int line_number = GetScriptLineNumberSafe(location->script(),
+ location->start_pos());
+ OS::PrintError("Extension or internal compilation error at line %d.\n",
+ line_number);
}
}
@@ -1284,6 +1306,9 @@ char* Isolate::ArchiveThread(char* to) {
memcpy(to, reinterpret_cast<char*>(thread_local_top()),
sizeof(ThreadLocalTop));
InitializeThreadLocal();
+ clear_pending_exception();
+ clear_pending_message();
+ clear_scheduled_exception();
return to + sizeof(ThreadLocalTop);
}
@@ -1403,11 +1428,13 @@ Isolate::Isolate()
in_use_list_(0),
free_list_(0),
preallocated_storage_preallocated_(false),
- pc_to_code_cache_(NULL),
+ inner_pointer_to_code_cache_(NULL),
write_input_buffer_(NULL),
global_handles_(NULL),
context_switcher_(NULL),
thread_manager_(NULL),
+ fp_stubs_generated_(false),
+ has_installed_extensions_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
embedder_data_(NULL) {
@@ -1575,8 +1602,8 @@ Isolate::~Isolate() {
compilation_cache_ = NULL;
delete bootstrapper_;
bootstrapper_ = NULL;
- delete pc_to_code_cache_;
- pc_to_code_cache_ = NULL;
+ delete inner_pointer_to_code_cache_;
+ inner_pointer_to_code_cache_ = NULL;
delete write_input_buffer_;
write_input_buffer_ = NULL;
@@ -1610,9 +1637,6 @@ Isolate::~Isolate() {
void Isolate::InitializeThreadLocal() {
thread_local_top_.isolate_ = this;
thread_local_top_.Initialize();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
}
@@ -1700,7 +1724,7 @@ bool Isolate::Init(Deserializer* des) {
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();
- pc_to_code_cache_ = new PcToCodeCache(this);
+ inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
write_input_buffer_ = new StringInputBuffer();
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper();
@@ -1767,9 +1791,14 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, read the state into the now-empty heap.
if (des != NULL) {
des->Deserialize();
- stub_cache_->Clear();
+ stub_cache_->Initialize(true);
}
+ // Finish initialization of ThreadLocal after deserialization is done.
+ clear_pending_exception();
+ clear_pending_message();
+ clear_scheduled_exception();
+
// Deserializing may put strange things in the root array's copy of the
// stack guard.
heap_.SetStackLimits();
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 2582da644..2ea9b80b6 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -66,7 +66,7 @@ class HandleScopeImplementer;
class HeapProfiler;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
-class PcToCodeCache;
+class InnerPointerToCodeCache;
class PreallocatedMemoryThread;
class RegExpStack;
class SaveContext;
@@ -255,6 +255,9 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
+ // Head of the list of live LookupResults.
+ LookupResult* top_lookup_result_;
+
// Whether out of memory exceptions should be ignored.
bool ignore_out_of_memory_;
@@ -311,7 +314,6 @@ class HashMap;
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
- V(uint32_t, random_seed, 2) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -841,7 +843,9 @@ class Isolate {
return unicode_cache_;
}
- PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
+ InnerPointerToCodeCache* inner_pointer_to_code_cache() {
+ return inner_pointer_to_code_cache_;
+ }
StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
@@ -879,12 +883,24 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
+ void set_fp_stubs_generated(bool value) {
+ fp_stubs_generated_ = value;
+ }
+
+ bool fp_stubs_generated() { return fp_stubs_generated_; }
+
StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
return &compiler_safe_string_input_buffer_;
}
Builtins* builtins() { return &builtins_; }
+ void NotifyExtensionInstalled() {
+ has_installed_extensions_ = true;
+ }
+
+ bool has_installed_extensions() { return has_installed_extensions_; }
+
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize() {
return &regexp_macro_assembler_canonicalize_;
@@ -987,6 +1003,13 @@ class Isolate {
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
+ LookupResult* top_lookup_result() {
+ return thread_local_top_.top_lookup_result_;
+ }
+ void SetTopLookupResult(LookupResult* top) {
+ thread_local_top_.top_lookup_result_ = top;
+ }
+
private:
Isolate();
@@ -1130,14 +1153,16 @@ class Isolate {
PreallocatedStorage in_use_list_;
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
- PcToCodeCache* pc_to_code_cache_;
+ InnerPointerToCodeCache* inner_pointer_to_code_cache_;
StringInputBuffer* write_input_buffer_;
GlobalHandles* global_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
+ bool fp_stubs_generated_;
StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
+ bool has_installed_extensions_;
StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
@@ -1210,19 +1235,7 @@ class Isolate {
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
public:
- explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
- context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
- }
- isolate->set_save_context(this);
-
- // If there is no JS frame under the current C frame, use the value 0.
- JavaScriptFrameIterator it(isolate);
- js_sp_ = it.done() ? 0 : it.frame()->sp();
- }
+ inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
if (context_.is_null()) {
@@ -1240,8 +1253,8 @@ class SaveContext BASE_EMBEDDED {
SaveContext* prev() { return prev_; }
// Returns true if this save context is below a given JavaScript frame.
- bool below(JavaScriptFrame* frame) {
- return (js_sp_ == 0) || (frame->sp() < js_sp_);
+ bool IsBelowFrame(JavaScriptFrame* frame) {
+ return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
private:
@@ -1250,7 +1263,7 @@ class SaveContext BASE_EMBEDDED {
Handle<Context> dummy_;
#endif
SaveContext* prev_;
- Address js_sp_; // The top JS frame's sp when saving context.
+ Address c_entry_fp_;
};
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 68eab65fd..ca796a699 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -165,7 +165,7 @@ class JsonParser BASE_EMBEDDED {
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
- isolate_ = source->map()->isolate();
+ isolate_ = source->map()->GetHeap()->isolate();
FlattenString(source);
source_ = source;
source_length_ = source_->length();
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index deba12621..ccef4456d 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -345,4 +345,4 @@ function SetUpJSON() {
));
}
-SetUpJSON()
+SetUpJSON();
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 3ebfbdfc9..18ff2570e 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -68,9 +68,9 @@ Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
Handle<String> flags,
bool* has_pending_exception) {
// Call the construct code with 2 arguments.
- Object** argv[2] = { Handle<Object>::cast(pattern).location(),
- Handle<Object>::cast(flags).location() };
- return Execution::New(constructor, 2, argv, has_pending_exception);
+ Handle<Object> argv[] = { pattern, flags };
+ return Execution::New(constructor, ARRAY_SIZE(argv), argv,
+ has_pending_exception);
}
@@ -509,14 +509,16 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
- if (IrregexpInterpreter::Match(isolate,
- byte_codes,
- subject,
- register_vector,
- index)) {
- return RE_SUCCESS;
+ IrregexpResult result = IrregexpInterpreter::Match(isolate,
+ byte_codes,
+ subject,
+ register_vector,
+ index);
+ if (result == RE_EXCEPTION) {
+ ASSERT(!isolate->has_pending_exception());
+ isolate->StackOverflow();
}
- return RE_FAILURE;
+ return result;
#endif // V8_INTERPRETED_REGEXP
}
@@ -4723,7 +4725,6 @@ bool OutSet::Get(unsigned value) {
const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-const DispatchTable::Entry DispatchTable::Config::kNoValue;
void DispatchTable::AddRange(CharacterRange full_range, int value) {
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 54297a49a..df110d1c2 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#define V8_JSREGEXP_H_
#include "allocation.h"
+#include "assembler.h"
#include "zone-inl.h"
namespace v8 {
@@ -388,7 +389,7 @@ class DispatchTable : public ZoneObject {
typedef uc16 Key;
typedef Entry Value;
static const uc16 kNoKey;
- static const Entry kNoValue;
+ static const Entry NoValue() { return Value(); }
static inline int Compare(uc16 a, uc16 b) {
if (a == b)
return 0;
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 80bccc9bc..e2c358cee 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -216,11 +216,11 @@ int SortedListBSearch(
int mid = (low + high) / 2;
T mid_elem = list[mid];
- if (mid_elem > elem) {
+ if (cmp(&mid_elem, &elem) > 0) {
high = mid - 1;
continue;
}
- if (mid_elem < elem) {
+ if (cmp(&mid_elem, &elem) < 0) {
low = mid + 1;
continue;
}
@@ -236,6 +236,7 @@ int SortedListBSearch(const List<T>& list, T elem) {
return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
}
+
} } // namespace v8::internal
#endif // V8_LIST_INL_H_
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 055870904..57504e075 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -165,8 +165,11 @@ class List {
class Map;
class Code;
+template<typename T> class Handle;
typedef List<Map*> MapList;
typedef List<Code*> CodeList;
+typedef List<Handle<Map> > MapHandleList;
+typedef List<Handle<Code> > CodeHandleList;
// Perform binary search for an element in an already sorted
// list. Returns the index of the element of -1 if it was not found.
@@ -176,6 +179,7 @@ int SortedListBSearch(
template <typename T>
int SortedListBSearch(const List<T>& list, T elem);
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 466110678..c4d8b1e5b 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -152,8 +152,8 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
LiveRange::LiveRange(int id)
: id_(id),
spilled_(false),
+ is_double_(false),
assigned_register_(kInvalidAssignment),
- assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
@@ -169,7 +169,7 @@ LiveRange::LiveRange(int id)
void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
- assigned_register_kind_ = register_kind;
+ is_double_ = (register_kind == DOUBLE_REGISTERS);
ConvertOperands();
}
@@ -234,7 +234,8 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) {
// at the current or the immediate next position.
UsePosition* use_pos = NextRegisterPosition(pos);
if (use_pos == NULL) return true;
- return use_pos->pos().Value() > pos.NextInstruction().Value();
+ return
+ use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
}
@@ -555,7 +556,7 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
reusable_slots_(8),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
- mode_(NONE),
+ mode_(GENERAL_REGISTERS),
num_registers_(-1),
graph_(graph),
has_osr_entry_(false) {}
@@ -1043,11 +1044,13 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
// it into a location different from the operand of a live range
// covering a branch instruction.
// Thus we need to manually record a pointer.
- if (phi->representation().IsTagged()) {
- LInstruction* branch =
- InstructionAt(cur_block->last_instruction_index());
- if (branch->HasPointerMap()) {
+ LInstruction* branch =
+ InstructionAt(cur_block->last_instruction_index());
+ if (branch->HasPointerMap()) {
+ if (phi->representation().IsTagged()) {
branch->pointer_map()->RecordPointer(phi_operand);
+ } else if (!phi->representation().IsDouble()) {
+ branch->pointer_map()->RecordUntagged(phi_operand);
}
}
}
@@ -1142,10 +1145,13 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
// it into a location different from the operand of a live range
// covering a branch instruction.
// Thus we need to manually record a pointer.
- if (HasTaggedValue(range->id())) {
- LInstruction* branch = InstructionAt(pred->last_instruction_index());
- if (branch->HasPointerMap()) {
+ LInstruction* branch = InstructionAt(pred->last_instruction_index());
+ if (branch->HasPointerMap()) {
+ if (HasTaggedValue(range->id())) {
branch->pointer_map()->RecordPointer(cur_op);
+ } else if (!cur_op->IsDoubleStackSlot() &&
+ !cur_op->IsDoubleRegister()) {
+ branch->pointer_map()->RemovePointer(cur_op);
}
}
}
@@ -1461,7 +1467,6 @@ void LAllocator::ProcessOsrEntry() {
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("Allocate general registers", this);
num_registers_ = Register::kNumAllocatableRegisters;
- mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
@@ -1475,7 +1480,6 @@ void LAllocator::AllocateDoubleRegisters() {
void LAllocator::AllocateRegisters() {
- ASSERT(mode_ != NONE);
ASSERT(unhandled_live_ranges_.is_empty());
for (int i = 0; i < live_ranges_.length(); ++i) {
@@ -1580,7 +1584,6 @@ void LAllocator::AllocateRegisters() {
const char* LAllocator::RegisterName(int allocation_index) {
- ASSERT(mode_ != NONE);
if (mode_ == GENERAL_REGISTERS) {
return Register::AllocationIndexToString(allocation_index);
} else {
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index e4e64974b..610beefc6 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -146,7 +146,6 @@ class LifetimePosition {
enum RegisterKind {
- NONE,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
@@ -319,7 +318,7 @@ class LiveRange: public ZoneObject {
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result);
- bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
+ bool IsDouble() const { return is_double_; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
@@ -377,8 +376,8 @@ class LiveRange: public ZoneObject {
int id_;
bool spilled_;
+ bool is_double_;
int assigned_register_;
- RegisterKind assigned_register_kind_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 5410f6f05..31b16982d 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -156,6 +156,27 @@ void LPointerMap::RecordPointer(LOperand* op) {
}
+void LPointerMap::RemovePointer(LOperand* op) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ for (int i = 0; i < pointer_operands_.length(); ++i) {
+ if (pointer_operands_[i]->Equals(op)) {
+ pointer_operands_.Remove(i);
+ --i;
+ }
+ }
+}
+
+
+void LPointerMap::RecordUntagged(LOperand* op) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ untagged_operands_.Add(op);
+}
+
+
void LPointerMap::PrintTo(StringStream* stream) {
stream->Add("{");
for (int i = 0; i < pointer_operands_.length(); ++i) {
@@ -182,6 +203,7 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
return 3;
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 20da21a63..b605eb97b 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -407,9 +407,18 @@ class LParallelMove : public ZoneObject {
class LPointerMap: public ZoneObject {
public:
explicit LPointerMap(int position)
- : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+ : pointer_operands_(8),
+ untagged_operands_(0),
+ position_(position),
+ lithium_position_(-1) { }
+
+ const ZoneList<LOperand*>* GetNormalizedOperands() {
+ for (int i = 0; i < untagged_operands_.length(); ++i) {
+ RemovePointer(untagged_operands_[i]);
+ }
+ untagged_operands_.Clear();
+ return &pointer_operands_;
+ }
int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
@@ -419,10 +428,13 @@ class LPointerMap: public ZoneObject {
}
void RecordPointer(LOperand* op);
+ void RemovePointer(LOperand* op);
+ void RecordUntagged(LOperand* op);
void PrintTo(StringStream* stream);
private:
ZoneList<LOperand*> pointer_operands_;
+ ZoneList<LOperand*> untagged_operands_;
int position_;
int lithium_position_;
};
@@ -442,6 +454,7 @@ class LEnvironment: public ZoneObject {
translation_index_(-1),
ast_id_(ast_id),
parameter_count_(parameter_count),
+ pc_offset_(-1),
values_(value_count),
representations_(value_count),
spilled_registers_(NULL),
@@ -455,6 +468,7 @@ class LEnvironment: public ZoneObject {
int translation_index() const { return translation_index_; }
int ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
+ int pc_offset() const { return pc_offset_; }
LOperand** spilled_registers() const { return spilled_registers_; }
LOperand** spilled_double_registers() const {
return spilled_double_registers_;
@@ -471,10 +485,13 @@ class LEnvironment: public ZoneObject {
return representations_[index].IsTagged();
}
- void Register(int deoptimization_index, int translation_index) {
+ void Register(int deoptimization_index,
+ int translation_index,
+ int pc_offset) {
ASSERT(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index;
+ pc_offset_ = pc_offset;
}
bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
@@ -495,6 +512,7 @@ class LEnvironment: public ZoneObject {
int translation_index_;
int ast_id_;
int parameter_count_;
+ int pc_offset_;
ZoneList<LOperand*> values_;
ZoneList<Representation> representations_;
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index e05c53ce1..c94a3ee60 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -325,9 +325,10 @@ Debug.LiveEdit = new function() {
if (old_node.children[i].live_shared_function_infos) {
old_node.children[i].live_shared_function_infos.
forEach(function (old_child_info) {
- %LiveEditReplaceRefToNestedFunction(old_info.info,
- corresponding_child_info,
- old_child_info.info);
+ %LiveEditReplaceRefToNestedFunction(
+ old_info.info,
+ corresponding_child_info,
+ old_child_info.info);
});
}
}
@@ -381,7 +382,7 @@ Debug.LiveEdit = new function() {
position: break_point_position,
line: break_point.line(),
column: break_point.column()
- }
+ };
break_point_old_positions.push(old_position_description);
}
@@ -418,7 +419,7 @@ Debug.LiveEdit = new function() {
position: updated_position,
line: new_location.line,
column: new_location.column
- }
+ };
break_point.set(original_script);
@@ -428,7 +429,7 @@ Debug.LiveEdit = new function() {
new_positions: new_position_description
} );
}
- }
+ };
}
@@ -465,7 +466,7 @@ Debug.LiveEdit = new function() {
}
PosTranslator.prototype.GetChunks = function() {
return this.chunks;
- }
+ };
PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
var array = this.chunks;
@@ -492,18 +493,18 @@ Debug.LiveEdit = new function() {
inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
}
return inside_chunk_handler(pos, chunk);
- }
+ };
PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
Assert(false, "Cannot translate position in changed area");
- }
+ };
PosTranslator.ShiftWithTopInsideChunkHandler =
function(pos, diff_chunk) {
// We carelessly do not check whether we stay inside the chunk after
// translation.
return pos - diff_chunk.pos1 + diff_chunk.pos2;
- }
+ };
var FunctionStatus = {
// No change to function or its inner functions; however its positions
@@ -517,7 +518,7 @@ Debug.LiveEdit = new function() {
CHANGED: "changed",
// Function is changed but cannot be patched.
DAMAGED: "damaged"
- }
+ };
function CodeInfoTreeNode(code_info, children, array_index) {
this.info = code_info;
@@ -585,14 +586,14 @@ Debug.LiveEdit = new function() {
var chunk_it = new function() {
var chunk_index = 0;
var pos_diff = 0;
- this.current = function() { return chunks[chunk_index]; }
+ this.current = function() { return chunks[chunk_index]; };
this.next = function() {
var chunk = chunks[chunk_index];
pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
chunk_index++;
- }
- this.done = function() { return chunk_index >= chunks.length; }
- this.TranslatePos = function(pos) { return pos + pos_diff; }
+ };
+ this.done = function() { return chunk_index >= chunks.length; };
+ this.TranslatePos = function(pos) { return pos + pos_diff; };
};
// A recursive function that processes internals of a function and all its
@@ -946,16 +947,16 @@ Debug.LiveEdit = new function() {
BLOCKED_ON_OTHER_STACK: 3,
BLOCKED_UNDER_NATIVE_CODE: 4,
REPLACED_ON_ACTIVE_STACK: 5
- }
+ };
FunctionPatchabilityStatus.SymbolName = function(code) {
- var enum = FunctionPatchabilityStatus;
- for (name in enum) {
- if (enum[name] == code) {
+ var enumeration = FunctionPatchabilityStatus;
+ for (name in enumeration) {
+ if (enumeration[name] == code) {
return name;
}
}
- }
+ };
// A logical failure in liveedit process. This means that change_log
@@ -968,7 +969,7 @@ Debug.LiveEdit = new function() {
Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
- }
+ };
// A testing entry.
function GetPcFromSourcePos(func, source_pos) {
@@ -1078,5 +1079,5 @@ Debug.LiveEdit = new function() {
PosTranslator: PosTranslator,
CompareStrings: CompareStrings,
ApplySingleChunkPatch: ApplySingleChunkPatch
- }
-}
+ };
+};
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index d44c2fc1c..eb183dac5 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -602,7 +602,8 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
// Build AST.
CompilationInfo info(script);
info.MarkAsGlobal();
- if (ParserApi::Parse(&info)) {
+ // Parse and don't allow skipping lazy functions.
+ if (ParserApi::Parse(&info, kNoParsingFlags)) {
// Compile the code.
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (Compiler::MakeCodeForLiveEdit(&info)) {
@@ -797,7 +798,7 @@ class FunctionInfoListener {
HandleScope scope;
FunctionInfoWrapper info = FunctionInfoWrapper::Create();
info.SetInitialProperties(fun->name(), fun->start_position(),
- fun->end_position(), fun->num_parameters(),
+ fun->end_position(), fun->parameter_count(),
current_parent_index_);
current_parent_index_ = len_;
SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -855,38 +856,20 @@ class FunctionInfoListener {
return HEAP->undefined_value();
}
do {
- ZoneList<Variable*> list(10);
- outer_scope->CollectUsedVariables(&list);
- int j = 0;
- for (int i = 0; i < list.length(); i++) {
- Variable* var1 = list[i];
- if (var1->IsContextSlot()) {
- if (j != i) {
- list[j] = var1;
- }
- j++;
- }
- }
+ ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
+ ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
+ outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+ context_list.Sort(&Variable::CompareIndex);
- // Sort it.
- for (int k = 1; k < j; k++) {
- int l = k;
- for (int m = k + 1; m < j; m++) {
- if (list[l]->index() > list[m]->index()) {
- l = m;
- }
- }
- list[k] = list[l];
- }
- for (int i = 0; i < j; i++) {
+ for (int i = 0; i < context_list.length(); i++) {
SetElementNonStrict(scope_info_list,
scope_info_length,
- list[i]->name());
+ context_list[i]->name());
scope_info_length++;
SetElementNonStrict(
scope_info_list,
scope_info_length,
- Handle<Smi>(Smi::FromInt(list[i]->index())));
+ Handle<Smi>(Smi::FromInt(context_list[i]->index())));
scope_info_length++;
}
SetElementNonStrict(scope_info_list,
@@ -1000,6 +983,7 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!HEAP->InNewSpace(substitution));
+ HeapIterator iterator;
AssertNoAllocation no_allocations_please;
// A zone scope for ReferenceCollectorVisitor.
@@ -1016,7 +1000,6 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
- HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
@@ -1101,12 +1084,14 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+ HEAP->EnsureHeapIsIterable();
+
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
ReplaceCodeObject(shared_info->code(), *code);
Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
if (code_scope_info->IsFixedArray()) {
- shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
+ shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
}
@@ -1271,7 +1256,8 @@ class RelocInfoBuffer {
// Patch positions in code (changes relocation info section) and possibly
// returns new instance of code.
-static Handle<Code> PatchPositionsInCode(Handle<Code> code,
+static Handle<Code> PatchPositionsInCode(
+ Handle<Code> code,
Handle<JSArray> position_change_array) {
RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1286,7 +1272,7 @@ static Handle<Code> PatchPositionsInCode(Handle<Code> code,
int new_position = TranslatePosition(position,
position_change_array);
if (position != new_position) {
- RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
+ RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
buffer_writer.Write(&info_copy);
continue;
}
@@ -1333,6 +1319,8 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
+ HEAP->EnsureHeapIsIterable();
+
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc
index 957c0515d..408e2a316 100644
--- a/deps/v8/src/liveobjectlist.cc
+++ b/deps/v8/src/liveobjectlist.cc
@@ -1085,7 +1085,7 @@ void LiveObjectList::SortAll() {
static int CountHeapObjects() {
int count = 0;
// Iterate over all the heap spaces and count the number of objects.
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+ HeapIterator iterator;
HeapObject* heap_obj = NULL;
while ((heap_obj = iterator.next()) != NULL) {
count++;
@@ -1122,7 +1122,7 @@ MaybeObject* LiveObjectList::Capture() {
// allocation, and we need allocate below.
{
// Iterate over all the heap spaces and add the objects.
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+ HeapIterator iterator;
HeapObject* heap_obj = NULL;
bool failed = false;
while (!failed && (heap_obj = iterator.next()) != NULL) {
@@ -1336,7 +1336,9 @@ MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
// Allocate the JSArray of the elements.
Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
if (elements->IsFailure()) return Object::cast(*elements);
- Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+
+ maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+ if (maybe_result->IsFailure()) return maybe_result;
// Set body.elements.
Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
@@ -1462,7 +1464,9 @@ MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
Handle<JSObject> summary_obj =
factory->NewJSObject(isolate->array_function());
if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
- Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+
+ maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+ if (maybe_result->IsFailure()) return maybe_result;
// Create the body object.
Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
@@ -1589,7 +1593,9 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
// Return the result as a JS array.
Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
- Handle<JSArray>::cast(lols)->SetContent(*list);
+
+ maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
+ if (maybe_result->IsFailure()) return maybe_result;
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
if (result->IsFailure()) return Object::cast(*result);
@@ -2507,7 +2513,7 @@ void LiveObjectList::Verify(bool match_heap_exactly) {
OS::Print(" Start verify ...\n");
OS::Print(" Verifying ...");
Flush();
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+ HeapIterator iterator;
HeapObject* heap_obj = NULL;
while ((heap_obj = iterator.next()) != NULL) {
number_of_heap_objects++;
@@ -2613,7 +2619,7 @@ void LiveObjectList::VerifyNotInFromSpace() {
HeapObject* heap_obj = it.Obj();
if (heap->InFromSpace(heap_obj)) {
OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
- i++, heap_obj, heap->new_space()->FromSpaceLow());
+ i++, heap_obj, Heap::new_space()->FromSpaceStart());
}
}
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 3d66b5fb1..eab26392e 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1356,12 +1356,12 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
Handle<Code>* code_objects) {
+ HeapIterator iterator;
AssertNoAllocation no_alloc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
- HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsSharedFunctionInfo()) continue;
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
@@ -1450,6 +1450,8 @@ void Logger::LogCodeInfo() {
const char arch[] = "x64";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
+#elif V8_TARGET_ARCH_MIPS
+ const char arch[] = "mips";
#else
const char arch[] = "unknown";
#endif
@@ -1519,8 +1521,9 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
void Logger::LogCodeObjects() {
- AssertNoAllocation no_alloc;
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
HeapIterator iterator;
+ AssertNoAllocation no_alloc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
}
@@ -1573,6 +1576,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
void Logger::LogCompiledFunctions() {
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1591,9 +1595,9 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
- AssertNoAllocation no_alloc;
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
HeapIterator iterator;
- i::Isolate* isolate = ISOLATE;
+ AssertNoAllocation no_alloc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1601,11 +1605,11 @@ void Logger::LogAccessorCallbacks() {
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
if (getter_entry != 0) {
- PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
+ PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
- PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
+ PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
}
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index fe19810a2..677dada03 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -29,6 +29,7 @@
#define V8_LOG_H_
#include "allocation.h"
+#include "objects.h"
#include "platform.h"
#include "log-utils.h"
@@ -294,7 +295,13 @@ class Logger {
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
// Profiler's sampling interval (in milliseconds).
+#if defined(ANDROID)
+ // Phones and tablets have processors that are much slower than desktop
+ // and laptop computers for which current heuristics are tuned.
+ static const int kSamplingIntervalMs = 5;
+#else
static const int kSamplingIntervalMs = 1;
+#endif
// Callback from Log, stops profiling in case of insufficient resources.
void LogFailure();
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 30838bd76..364fdb627 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -93,6 +93,63 @@ const int kInvalidProtoDepth = -1;
namespace v8 {
namespace internal {
+class FrameScope {
+ public:
+ explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
+ : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
+ masm->set_has_frame(true);
+ if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ masm->EnterFrame(type);
+ }
+ }
+
+ ~FrameScope() {
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ masm_->LeaveFrame(type_);
+ }
+ masm_->set_has_frame(old_has_frame_);
+ }
+
+ // Normally we generate the leave-frame code when this object goes
+ // out of scope. Sometimes we may need to generate the code somewhere else
+ // in addition. Calling this will achieve that, but the object stays in
+ // scope, the MacroAssembler is still marked as being in a frame scope, and
+ // the code will be generated again when it goes out of scope.
+ void GenerateLeaveFrame() {
+ masm_->LeaveFrame(type_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ StackFrame::Type type_;
+ bool old_has_frame_;
+};
+
+
+class AllowExternalCallThatCantCauseGC: public FrameScope {
+ public:
+ explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
+ : FrameScope(masm, StackFrame::NONE) { }
+};
+
+
+class NoCurrentFrameScope {
+ public:
+ explicit NoCurrentFrameScope(MacroAssembler* masm)
+ : masm_(masm), saved_(masm->has_frame()) {
+ masm->set_has_frame(false);
+ }
+
+ ~NoCurrentFrameScope() {
+ masm_->set_has_frame(saved_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ bool saved_;
+};
+
+
// Support for "structured" code comments.
#ifdef DEBUG
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 7a493ca70..bf7119fea 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -82,8 +82,6 @@ const kMinYear = -1000000;
const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
-const kMinDate = -100000000;
-const kMaxDate = 100000000;
# Native cache ids.
const STRING_TO_REGEXP_CACHE_ID = 0;
@@ -128,6 +126,11 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# we cannot handle those anyway.
macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
+# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
+const kBoundFunctionIndex = 0;
+const kBoundThisIndex = 1;
+const kBoundArgumentsStartIndex = 2;
+
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
new file mode 100644
index 000000000..573715e28
--- /dev/null
+++ b/deps/v8/src/mark-compact-inl.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARK_COMPACT_INL_H_
+#define V8_MARK_COMPACT_INL_H_
+
+#include "isolate.h"
+#include "memory.h"
+#include "mark-compact.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MarkBit Marking::MarkBitFrom(Address addr) {
+ MemoryChunk* p = MemoryChunk::FromAddress(addr);
+ return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
+ p->ContainsOnlyData());
+}
+
+
+void MarkCompactCollector::SetFlags(int flags) {
+ sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
+}
+
+
+void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
+ ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+ if (!mark_bit.Get()) {
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+ ProcessNewlyMarkedObject(obj);
+ }
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
+ ASSERT(!mark_bit.Get());
+ ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+}
+
+
+bool MarkCompactCollector::IsMarked(Object* obj) {
+ ASSERT(obj->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(obj);
+ return Marking::MarkBitFrom(heap_object).Get();
+}
+
+
+void MarkCompactCollector::RecordSlot(Object** anchor_slot,
+ Object** slot,
+ Object* object) {
+ Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
+ if (object_page->IsEvacuationCandidate() &&
+ !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+ if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ object_page->slots_buffer_address(),
+ slot,
+ SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ EvictEvacuationCandidate(object_page);
+ }
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_MARK_COMPACT_INL_H_
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 3e4a617b7..fb3ac5697 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -27,20 +27,31 @@
#include "v8.h"
+#include "code-stubs.h"
#include "compilation-cache.h"
+#include "deoptimizer.h"
#include "execution.h"
-#include "heap-profiler.h"
#include "gdb-jit.h"
#include "global-handles.h"
+#include "heap-profiler.h"
#include "ic-inl.h"
+#include "incremental-marking.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "objects-visiting.h"
+#include "objects-visiting-inl.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
+
+const char* Marking::kWhiteBitPattern = "00";
+const char* Marking::kBlackBitPattern = "10";
+const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kImpossibleBitPattern = "01";
+
+
// -------------------------------------------------------------------------
// MarkCompactCollector
@@ -48,70 +59,462 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
#ifdef DEBUG
state_(IDLE),
#endif
- force_compaction_(false),
- compacting_collection_(false),
- compact_on_next_gc_(false),
- previous_marked_count_(0),
+ sweep_precisely_(false),
+ compacting_(false),
+ was_marked_incrementally_(false),
+ collect_maps_(FLAG_collect_maps),
tracer_(NULL),
-#ifdef DEBUG
- live_young_objects_size_(0),
- live_old_pointer_objects_size_(0),
- live_old_data_objects_size_(0),
- live_code_objects_size_(0),
- live_map_objects_size_(0),
- live_cell_objects_size_(0),
- live_lo_objects_size_(0),
- live_bytes_(0),
-#endif
+ migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
encountered_weak_maps_(NULL) { }
+#ifdef DEBUG
+class VerifyMarkingVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
+ }
+ }
+ }
+};
+
+
+static void VerifyMarking(Address bottom, Address top) {
+ VerifyMarkingVisitor visitor;
+ HeapObject* object;
+ Address next_object_must_be_here_or_later = bottom;
+
+ for (Address current = bottom;
+ current < top;
+ current += kPointerSize) {
+ object = HeapObject::FromAddress(current);
+ if (MarkCompactCollector::IsMarked(object)) {
+ ASSERT(current >= next_object_must_be_here_or_later);
+ object->Iterate(&visitor);
+ next_object_must_be_here_or_later = current + object->Size();
+ }
+ }
+}
+
+
+static void VerifyMarking(NewSpace* space) {
+ Address end = space->top();
+ NewSpacePageIterator it(space->bottom(), end);
+ // The bottom position is at the start of its page. Allows us to use
+ // page->body() as start of range on all pages.
+ ASSERT_EQ(space->bottom(),
+ NewSpacePage::FromAddress(space->bottom())->body());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ Address limit = it.has_next() ? page->body_limit() : end;
+ ASSERT(limit == end || !page->Contains(end));
+ VerifyMarking(page->body(), limit);
+ }
+}
+
+
+static void VerifyMarking(PagedSpace* space) {
+ PageIterator it(space);
+
+ while (it.has_next()) {
+ Page* p = it.next();
+ VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
+ }
+}
+
+
+static void VerifyMarking(Heap* heap) {
+ VerifyMarking(heap->old_pointer_space());
+ VerifyMarking(heap->old_data_space());
+ VerifyMarking(heap->code_space());
+ VerifyMarking(heap->cell_space());
+ VerifyMarking(heap->map_space());
+ VerifyMarking(heap->new_space());
+
+ VerifyMarkingVisitor visitor;
+
+ LargeObjectIterator it(heap->lo_space());
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ if (MarkCompactCollector::IsMarked(obj)) {
+ obj->Iterate(&visitor);
+ }
+ }
+
+ heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+class VerifyEvacuationVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+ }
+ }
+ }
+};
+
+
+static void VerifyEvacuation(Address bottom, Address top) {
+ VerifyEvacuationVisitor visitor;
+ HeapObject* object;
+ Address next_object_must_be_here_or_later = bottom;
+
+ for (Address current = bottom;
+ current < top;
+ current += kPointerSize) {
+ object = HeapObject::FromAddress(current);
+ if (MarkCompactCollector::IsMarked(object)) {
+ ASSERT(current >= next_object_must_be_here_or_later);
+ object->Iterate(&visitor);
+ next_object_must_be_here_or_later = current + object->Size();
+ }
+ }
+}
+
+
+static void VerifyEvacuation(NewSpace* space) {
+ NewSpacePageIterator it(space->bottom(), space->top());
+ VerifyEvacuationVisitor visitor;
+
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ Address current = page->body();
+ Address limit = it.has_next() ? page->body_limit() : space->top();
+ ASSERT(limit == space->top() || !page->Contains(space->top()));
+ while (current < limit) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ object->Iterate(&visitor);
+ current += object->Size();
+ }
+ }
+}
+
+
+static void VerifyEvacuation(PagedSpace* space) {
+ PageIterator it(space);
+
+ while (it.has_next()) {
+ Page* p = it.next();
+ if (p->IsEvacuationCandidate()) continue;
+ VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
+ }
+}
+
+
+static void VerifyEvacuation(Heap* heap) {
+ VerifyEvacuation(heap->old_pointer_space());
+ VerifyEvacuation(heap->old_data_space());
+ VerifyEvacuation(heap->code_space());
+ VerifyEvacuation(heap->cell_space());
+ VerifyEvacuation(heap->map_space());
+ VerifyEvacuation(heap->new_space());
+
+ VerifyEvacuationVisitor visitor;
+ heap->IterateStrongRoots(&visitor, VISIT_ALL);
+}
+#endif
+
+
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+ p->MarkEvacuationCandidate();
+ evacuation_candidates_.Add(p);
+}
+
+
+bool MarkCompactCollector::StartCompaction() {
+ if (!compacting_) {
+ ASSERT(evacuation_candidates_.length() == 0);
+
+ CollectEvacuationCandidates(heap()->old_pointer_space());
+ CollectEvacuationCandidates(heap()->old_data_space());
+
+ if (FLAG_compact_code_space) {
+ CollectEvacuationCandidates(heap()->code_space());
+ }
+
+ heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+
+ compacting_ = evacuation_candidates_.length() > 0;
+ }
+
+ return compacting_;
+}
+
+
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
ASSERT(state_ == PREPARE_GC);
ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
- // Prepare has selected whether to compact the old generation or not.
- // Tell the tracer.
- if (IsCompacting()) tracer_->set_is_compacting();
-
MarkLiveObjects();
+ ASSERT(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveTransitions();
+ if (collect_maps_) ClearNonLiveTransitions();
ClearWeakMaps();
- SweepLargeObjectSpace();
+#ifdef DEBUG
+ if (FLAG_verify_heap) {
+ VerifyMarking(heap_);
+ }
+#endif
- if (IsCompacting()) {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
- EncodeForwardingAddresses();
+ SweepSpaces();
- heap()->MarkMapPointersAsEncoded(true);
- UpdatePointers();
- heap()->MarkMapPointersAsEncoded(false);
- heap()->isolate()->pc_to_code_cache()->Flush();
+ if (!collect_maps_) ReattachInitialMaps();
- RelocateObjects();
- } else {
- SweepSpaces();
- heap()->isolate()->pc_to_code_cache()->Flush();
- }
+ heap_->isolate()->inner_pointer_to_code_cache()->Flush();
Finish();
- // Save the count of marked objects remaining after the collection and
- // null out the GC tracer.
- previous_marked_count_ = tracer_->marked_count();
- ASSERT(previous_marked_count_ == 0);
tracer_ = NULL;
}
+#ifdef DEBUG
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
+ PageIterator it(space);
+
+ while (it.has_next()) {
+ Page* p = it.next();
+ CHECK(p->markbits()->IsClean());
+ CHECK_EQ(0, p->LiveBytes());
+ }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+ NewSpacePageIterator it(space->bottom(), space->top());
+
+ while (it.has_next()) {
+ NewSpacePage* p = it.next();
+ CHECK(p->markbits()->IsClean());
+ CHECK_EQ(0, p->LiveBytes());
+ }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean() {
+ VerifyMarkbitsAreClean(heap_->old_pointer_space());
+ VerifyMarkbitsAreClean(heap_->old_data_space());
+ VerifyMarkbitsAreClean(heap_->code_space());
+ VerifyMarkbitsAreClean(heap_->cell_space());
+ VerifyMarkbitsAreClean(heap_->map_space());
+ VerifyMarkbitsAreClean(heap_->new_space());
+
+ LargeObjectIterator it(heap_->lo_space());
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ ASSERT(Marking::IsWhite(mark_bit));
+ }
+}
+#endif
+
+
+static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
+ PageIterator it(space);
+
+ while (it.has_next()) {
+ Bitmap::Clear(it.next());
+ }
+}
+
+
+static void ClearMarkbitsInNewSpace(NewSpace* space) {
+ NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+
+ while (it.has_next()) {
+ Bitmap::Clear(it.next());
+ }
+}
+
+
+void MarkCompactCollector::ClearMarkbits() {
+ ClearMarkbitsInPagedSpace(heap_->code_space());
+ ClearMarkbitsInPagedSpace(heap_->map_space());
+ ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
+ ClearMarkbitsInPagedSpace(heap_->old_data_space());
+ ClearMarkbitsInPagedSpace(heap_->cell_space());
+ ClearMarkbitsInNewSpace(heap_->new_space());
+
+ LargeObjectIterator it(heap_->lo_space());
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ mark_bit.Clear();
+ mark_bit.Next().Clear();
+ }
+}
+
+
+bool Marking::TransferMark(Address old_start, Address new_start) {
+ // This is only used when resizing an object.
+ ASSERT(MemoryChunk::FromAddress(old_start) ==
+ MemoryChunk::FromAddress(new_start));
+
+ // If the mark doesn't move, we don't check the color of the object.
+ // It doesn't matter whether the object is black, since it hasn't changed
+ // size, so the adjustment to the live data count will be zero anyway.
+ if (old_start == new_start) return false;
+
+ MarkBit new_mark_bit = MarkBitFrom(new_start);
+ MarkBit old_mark_bit = MarkBitFrom(old_start);
+
+#ifdef DEBUG
+ ObjectColor old_color = Color(old_mark_bit);
+#endif
+
+ if (Marking::IsBlack(old_mark_bit)) {
+ old_mark_bit.Clear();
+ ASSERT(IsWhite(old_mark_bit));
+ Marking::MarkBlack(new_mark_bit);
+ return true;
+ } else if (Marking::IsGrey(old_mark_bit)) {
+ ASSERT(heap_->incremental_marking()->IsMarking());
+ old_mark_bit.Clear();
+ old_mark_bit.Next().Clear();
+ ASSERT(IsWhite(old_mark_bit));
+ heap_->incremental_marking()->WhiteToGreyAndPush(
+ HeapObject::FromAddress(new_start), new_mark_bit);
+ heap_->incremental_marking()->RestartIfNotMarking();
+ }
+
+#ifdef DEBUG
+ ObjectColor new_color = Color(new_mark_bit);
+ ASSERT(new_color == old_color);
+#endif
+
+ return false;
+}
+
+
+const char* AllocationSpaceName(AllocationSpace space) {
+ switch (space) {
+ case NEW_SPACE: return "NEW_SPACE";
+ case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
+ case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
+ case CODE_SPACE: return "CODE_SPACE";
+ case MAP_SPACE: return "MAP_SPACE";
+ case CELL_SPACE: return "CELL_SPACE";
+ case LO_SPACE: return "LO_SPACE";
+ default:
+ UNREACHABLE();
+ }
+
+ return NULL;
+}
+
+
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
+ ASSERT(space->identity() == OLD_POINTER_SPACE ||
+ space->identity() == OLD_DATA_SPACE ||
+ space->identity() == CODE_SPACE);
+
+ int number_of_pages = space->CountTotalPages();
+
+ PageIterator it(space);
+ const int kMaxMaxEvacuationCandidates = 1000;
+ int max_evacuation_candidates = Min(
+ kMaxMaxEvacuationCandidates,
+ static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+
+ if (FLAG_stress_compaction || FLAG_always_compact) {
+ max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+ }
+
+ class Candidate {
+ public:
+ Candidate() : fragmentation_(0), page_(NULL) { }
+ Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
+
+ int fragmentation() { return fragmentation_; }
+ Page* page() { return page_; }
+
+ private:
+ int fragmentation_;
+ Page* page_;
+ };
+
+ Candidate candidates[kMaxMaxEvacuationCandidates];
+
+ int count = 0;
+ if (it.has_next()) it.next(); // Never compact the first page.
+ int fragmentation = 0;
+ Candidate* least = NULL;
+ while (it.has_next()) {
+ Page* p = it.next();
+ p->ClearEvacuationCandidate();
+ if (FLAG_stress_compaction) {
+ int counter = space->heap()->ms_count();
+ uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
+ if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+ } else {
+ fragmentation = space->Fragmentation(p);
+ }
+ if (fragmentation != 0) {
+ if (count < max_evacuation_candidates) {
+ candidates[count++] = Candidate(fragmentation, p);
+ } else {
+ if (least == NULL) {
+ for (int i = 0; i < max_evacuation_candidates; i++) {
+ if (least == NULL ||
+ candidates[i].fragmentation() < least->fragmentation()) {
+ least = candidates + i;
+ }
+ }
+ }
+ if (least->fragmentation() < fragmentation) {
+ *least = Candidate(fragmentation, p);
+ least = NULL;
+ }
+ }
+ }
+ }
+ for (int i = 0; i < count; i++) {
+ AddEvacuationCandidate(candidates[i].page());
+ }
+
+ if (count > 0 && FLAG_trace_fragmentation) {
+ PrintF("Collected %d evacuation candidates for space %s\n",
+ count,
+ AllocationSpaceName(space->identity()));
+ }
+}
+
+
+void MarkCompactCollector::AbortCompaction() {
+ if (compacting_) {
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+ p->ClearEvacuationCandidate();
+ p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+ }
+ compacting_ = false;
+ evacuation_candidates_.Rewind(0);
+ invalidated_code_.Rewind(0);
+ }
+ ASSERT_EQ(0, evacuation_candidates_.length());
+}
+
+
void MarkCompactCollector::Prepare(GCTracer* tracer) {
+ was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
+ // Disable collection of maps if incremental marking is enabled.
+ // Map collection algorithm relies on a special map transition tree traversal
+ // order which is not implemented for incremental marking.
+ collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
+
// Rather than passing the tracer around we stash it in a static member
// variable.
tracer_ = tracer;
@@ -120,16 +523,10 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(state_ == IDLE);
state_ = PREPARE_GC;
#endif
- ASSERT(!FLAG_always_compact || !FLAG_never_compact);
- compacting_collection_ =
- FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
- compact_on_next_gc_ = false;
+ ASSERT(!FLAG_never_compact || !FLAG_always_compact);
- if (FLAG_never_compact) compacting_collection_ = false;
- if (!heap()->map_space()->MapPointersEncodable())
- compacting_collection_ = false;
- if (FLAG_collect_maps) CreateBackPointers();
+ if (collect_maps_) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
@@ -137,21 +534,31 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
#endif
+ // Clear marking bits for precise sweeping to collect all garbage.
+ if (was_marked_incrementally_ && PreciseSweepingRequired()) {
+ heap()->incremental_marking()->Abort();
+ ClearMarkbits();
+ AbortCompaction();
+ was_marked_incrementally_ = false;
+ }
+
+ // Don't start compaction if we are in the middle of incremental
+ // marking cycle. We did not collect any slots.
+ if (!FLAG_never_compact && !was_marked_incrementally_) {
+ StartCompaction();
+ }
+
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
- space != NULL; space = spaces.next()) {
- space->PrepareForMarkCompact(compacting_collection_);
+ space != NULL;
+ space = spaces.next()) {
+ space->PrepareForMarkCompact();
}
#ifdef DEBUG
- live_bytes_ = 0;
- live_young_objects_size_ = 0;
- live_old_pointer_objects_size_ = 0;
- live_old_data_objects_size_ = 0;
- live_code_objects_size_ = 0;
- live_map_objects_size_ = 0;
- live_cell_objects_size_ = 0;
- live_lo_objects_size_ = 0;
+ if (!was_marked_incrementally_ && FLAG_verify_heap) {
+ VerifyMarkbitsAreClean();
+ }
#endif
}
@@ -168,31 +575,6 @@ void MarkCompactCollector::Finish() {
heap()->isolate()->stub_cache()->Clear();
heap()->external_string_table_.CleanUp();
-
- // If we've just compacted old space there's no reason to check the
- // fragmentation limit. Just return.
- if (HasCompacted()) return;
-
- // We compact the old generation on the next GC if it has gotten too
- // fragmented (ie, we could recover an expected amount of space by
- // reclaiming the waste and free list blocks).
- static const int kFragmentationLimit = 15; // Percent.
- static const int kFragmentationAllowed = 1 * MB; // Absolute.
- intptr_t old_gen_recoverable = 0;
- intptr_t old_gen_used = 0;
-
- OldSpaces spaces;
- for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
- old_gen_recoverable += space->Waste() + space->AvailableFree();
- old_gen_used += space->Size();
- }
-
- int old_gen_fragmentation =
- static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
- if (old_gen_fragmentation > kFragmentationLimit &&
- old_gen_recoverable > kFragmentationAllowed) {
- compact_on_next_gc_ = true;
- }
}
@@ -261,13 +643,21 @@ class CodeFlusher {
SharedFunctionInfo* shared = candidate->unchecked_shared();
Code* code = shared->unchecked_code();
- if (!code->IsMarked()) {
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
candidate->set_code(shared->unchecked_code());
}
+ // We are in the middle of a GC cycle so the write barrier in the code
+ // setter did not record the slot update and we have to do that manually.
+ Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+ Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+ isolate_->heap()->mark_compact_collector()->
+ RecordCodeEntrySlot(slot, target);
+
candidate = next_candidate;
}
@@ -285,7 +675,8 @@ class CodeFlusher {
SetNextCandidate(candidate, NULL);
Code* code = candidate->unchecked_code();
- if (!code->IsMarked()) {
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
candidate->set_code(lazy_compile);
}
@@ -355,14 +746,14 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// except the maps for the object and its possible substrings might be
// marked.
HeapObject* object = HeapObject::cast(*p);
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- InstanceType type = map_word.ToMap()->instance_type();
+ if (!FLAG_clever_optimizations) return object;
+ Map* map = object->map();
+ InstanceType type = map->instance_type();
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- Heap* heap = map_word.ToMap()->heap();
- if (second != heap->raw_unchecked_empty_string()) {
+ Heap* heap = map->GetHeap();
+ if (second != heap->empty_string()) {
return object;
}
@@ -404,14 +795,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
+ table_.Register(kVisitGlobalContext, &VisitGlobalContext);
- table_.Register(kVisitGlobalContext,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Context::MarkCompactBodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
@@ -456,7 +845,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- MarkObjectByPointer(heap, p);
+ MarkObjectByPointer(heap->mark_compact_collector(), p, p);
}
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
@@ -466,29 +855,49 @@ class StaticMarkingVisitor : public StaticVisitorBase {
if (VisitUnmarkedObjects(heap, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ for (Object** p = start; p < end; p++) {
+ MarkObjectByPointer(collector, start, p);
+ }
+ }
+
+ static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(rinfo->target_cell());
+ MarkBit mark = Marking::MarkBitFrom(cell);
+ heap->mark_compact_collector()->MarkObject(cell, mark);
+ }
+
+ static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ // TODO(mstarzinger): We do not short-circuit cons strings here, verify
+ // that there can be no such embedded pointers and add assertion here.
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ MarkBit mark = Marking::MarkBitFrom(object);
+ heap->mark_compact_collector()->MarkObject(object, mark);
}
static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
// marked since they are contained in HEAP->non_monomorphic_cache().
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address());
} else {
- heap->mark_compact_collector()->MarkObject(code);
- }
- }
-
- static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(heap, &cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+ if (FLAG_cleanup_code_caches_at_gc &&
+ target->kind() == Code::STUB &&
+ target->major_key() == CodeStub::CallFunction &&
+ target->has_function_cache()) {
+ CallFunctionStub::Clear(heap, rinfo->pc());
+ }
+ MarkBit code_mark = Marking::MarkBitFrom(target);
+ heap->mark_compact_collector()->MarkObject(target, code_mark);
}
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
@@ -496,17 +905,21 @@ class StaticMarkingVisitor : public StaticVisitorBase {
rinfo->IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence()));
- HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
- heap->mark_compact_collector()->MarkObject(code);
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ MarkBit code_mark = Marking::MarkBitFrom(target);
+ heap->mark_compact_collector()->MarkObject(target, code_mark);
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
// Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
+ INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
+ Object** anchor_slot,
+ Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
- if (!object->IsMarked()) {
- heap->mark_compact_collector()->MarkUnmarkedObject(object);
- }
+ collector->RecordSlot(anchor_slot, p, object);
+ MarkBit mark = Marking::MarkBitFrom(object);
+ collector->MarkObject(object, mark);
}
@@ -515,12 +928,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
HeapObject* obj)) {
#ifdef DEBUG
ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!obj->IsMarked());
+ ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
#endif
Map* map = obj->map();
- collector->SetMark(obj);
+ Heap* heap = obj->GetHeap();
+ MarkBit mark = Marking::MarkBitFrom(obj);
+ heap->mark_compact_collector()->SetMark(obj, mark);
// Mark the map pointer and the body.
- if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ heap->mark_compact_collector()->MarkObject(map, map_mark);
IterateBody(map, obj);
}
@@ -536,15 +952,19 @@ class StaticMarkingVisitor : public StaticVisitorBase {
MarkCompactCollector* collector = heap->mark_compact_collector();
// Visit the unmarked objects.
for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- HeapObject* obj = HeapObject::cast(*p);
- if (obj->IsMarked()) continue;
+ Object* o = *p;
+ if (!o->IsHeapObject()) continue;
+ collector->RecordSlot(start, p, o);
+ HeapObject* obj = HeapObject::cast(o);
+ MarkBit mark = Marking::MarkBitFrom(obj);
+ if (mark.Get()) continue;
VisitUnmarkedObject(collector, obj);
}
return true;
}
static inline void VisitExternalReference(Address* p) { }
+ static inline void VisitExternalReference(RelocInfo* rinfo) { }
static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
private:
@@ -567,7 +987,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
void> StructObjectVisitor;
static void VisitJSWeakMap(Map* map, HeapObject* object) {
- MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
// Enqueue weak map in linked list of encountered weak maps.
@@ -578,25 +998,27 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip visiting the backing hash table containing the mappings.
int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
- map->heap(),
+ map->GetHeap(),
object,
JSWeakMap::BodyDescriptor::kStartOffset,
JSWeakMap::kTableOffset);
BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
- map->heap(),
+ map->GetHeap(),
object,
JSWeakMap::kTableOffset + kPointerSize,
object_size);
// Mark the backing hash table without pushing it on the marking stack.
- ASSERT(!weak_map->unchecked_table()->IsMarked());
- ASSERT(weak_map->unchecked_table()->map()->IsMarked());
- collector->SetMark(weak_map->unchecked_table());
+ ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+ ASSERT(!MarkCompactCollector::IsMarked(table));
+ collector->SetMark(table, Marking::MarkBitFrom(table));
+ collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
+ ASSERT(MarkCompactCollector::IsMarked(table->map()));
}
static void VisitCode(Map* map, HeapObject* object) {
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
- map->heap());
+ map->GetHeap());
}
// Code flushing support.
@@ -608,7 +1030,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static const int kRegExpCodeThreshold = 5;
inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->raw_unchecked_undefined_value();
+ Object* undefined = heap->undefined_value();
return (info->script() != undefined) &&
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
@@ -629,8 +1051,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- if (function->unchecked_code()->IsMarked()) {
- shared_info->set_code_age(0);
+ MarkBit code_mark =
+ Marking::MarkBitFrom(function->unchecked_code());
+ if (code_mark.Get()) {
+ if (!Marking::MarkBitFrom(shared_info).Get()) {
+ shared_info->set_code_age(0);
+ }
return false;
}
@@ -645,8 +1071,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- if (shared_info->unchecked_code()->IsMarked()) {
- shared_info->set_code_age(0);
+ MarkBit code_mark =
+ Marking::MarkBitFrom(shared_info->unchecked_code());
+ if (code_mark.Get()) {
return false;
}
@@ -658,11 +1085,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// We never flush code for Api functions.
Object* function_data = shared_info->function_data();
- if (function_data->IsHeapObject() &&
- (SafeMap(function_data)->instance_type() ==
- FUNCTION_TEMPLATE_INFO_TYPE)) {
- return false;
- }
+ if (function_data->IsFunctionTemplateInfo()) return false;
// Only flush code for functions.
if (shared_info->code()->kind() != Code::FUNCTION) return false;
@@ -695,40 +1118,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
return true;
}
-
- static inline Map* SafeMap(Object* obj) {
- MapWord map_word = HeapObject::cast(obj)->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return map_word.ToMap();
- }
-
-
- static inline bool IsJSBuiltinsObject(Object* obj) {
- return obj->IsHeapObject() &&
- (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
- }
-
-
static inline bool IsValidNotBuiltinContext(Object* ctx) {
- if (!ctx->IsHeapObject()) return false;
-
- Map* map = SafeMap(ctx);
- Heap* heap = map->heap();
- if (!(map == heap->raw_unchecked_function_context_map() ||
- map == heap->raw_unchecked_catch_context_map() ||
- map == heap->raw_unchecked_with_context_map() ||
- map == heap->raw_unchecked_global_context_map())) {
- return false;
- }
-
- Context* context = reinterpret_cast<Context*>(ctx);
-
- if (IsJSBuiltinsObject(context->global())) {
- return false;
- }
-
- return true;
+ return ctx->IsContext() &&
+ !Context::cast(ctx)->global()->IsJSBuiltinsObject();
}
@@ -748,13 +1140,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
bool is_ascii) {
// Make sure that the fixed array is in fact initialized on the RegExp.
// We could potentially trigger a GC when initializing the RegExp.
- if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
+ if (HeapObject::cast(re->data())->map()->instance_type() !=
+ FIXED_ARRAY_TYPE) return;
// Make sure this is a RegExp that actually contains code.
if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
- if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
+ if (!code->IsSmi() &&
+ HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
// Save a copy that can be reinstated if we need the code again.
re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
code,
@@ -790,7 +1184,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// If we did not use the code for kRegExpCodeThreshold mark sweep GCs
// we flush the code.
static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->heap();
+ Heap* heap = map->GetHeap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitJSRegExpFields(map, object);
@@ -807,7 +1201,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitSharedFunctionInfoAndFlushCode(Map* map,
HeapObject* object) {
- MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitSharedFunctionInfoGeneric(map, object);
return;
@@ -818,7 +1212,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitSharedFunctionInfoAndFlushCodeGeneric(
Map* map, HeapObject* object, bool known_flush_code_candidate) {
- Heap* heap = map->heap();
+ Heap* heap = map->GetHeap();
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
@@ -835,18 +1229,30 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitCodeEntry(Heap* heap, Address entry_address) {
- Object* code = Code::GetObjectFromEntryAddress(entry_address);
- Object* old_code = code;
- VisitPointer(heap, &code);
- if (code != old_code) {
- Memory::Address_at(entry_address) =
- reinterpret_cast<Code*>(code)->entry();
- }
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ MarkBit mark = Marking::MarkBitFrom(code);
+ heap->mark_compact_collector()->MarkObject(code, mark);
+ heap->mark_compact_collector()->
+ RecordCodeEntrySlot(entry_address, code);
}
+ static void VisitGlobalContext(Map* map, HeapObject* object) {
+ FixedBodyVisitor<StaticMarkingVisitor,
+ Context::MarkCompactBodyDescriptor,
+ void>::Visit(map, object);
+
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::GLOBAL_CONTEXT_SLOTS;
+ ++idx) {
+ Object** slot =
+ HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+ collector->RecordSlot(slot, slot, *slot);
+ }
+ }
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->heap();
+ Heap* heap = map->GetHeap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitJSFunction(map, object);
@@ -861,7 +1267,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
if (!flush_code_candidate) {
- collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
+ Code* code = jsfunction->unchecked_shared()->unchecked_code();
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ heap->mark_compact_collector()->MarkObject(code, code_mark);
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
// For optimized functions we should retain both non-optimized version
@@ -877,7 +1285,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
i < count;
i++) {
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
+ Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
+ MarkBit inlined_code_mark =
+ Marking::MarkBitFrom(inlined_code);
+ heap->mark_compact_collector()->MarkObject(
+ inlined_code, inlined_code_mark);
}
}
}
@@ -902,12 +1314,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitJSFunctionFields(Map* map,
JSFunction* object,
bool flush_code_candidate) {
- Heap* heap = map->heap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
+ Heap* heap = map->GetHeap();
VisitPointers(heap,
- SLOT_ADDR(object, JSFunction::kPropertiesOffset),
- SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
if (!flush_code_candidate) {
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -917,29 +1328,39 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Visit shared function info to avoid double checking of it's
// flushability.
SharedFunctionInfo* shared_info = object->unchecked_shared();
- if (!shared_info->IsMarked()) {
+ MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
+ if (!shared_info_mark.Get()) {
Map* shared_info_map = shared_info->map();
- collector->SetMark(shared_info);
- collector->MarkObject(shared_info_map);
+ MarkBit shared_info_map_mark =
+ Marking::MarkBitFrom(shared_info_map);
+ heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
+ heap->mark_compact_collector()->MarkObject(shared_info_map,
+ shared_info_map_mark);
VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
shared_info,
true);
}
}
- VisitPointers(heap,
- SLOT_ADDR(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
+ VisitPointers(
+ heap,
+ HeapObject::RawField(object,
+ JSFunction::kCodeEntryOffset + kPointerSize),
+ HeapObject::RawField(object,
+ JSFunction::kNonWeakFieldsEndOffset));
// Don't visit the next function list field as it is a weak reference.
+ Object** next_function =
+ HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
+ heap->mark_compact_collector()->RecordSlot(
+ next_function, next_function, *next_function);
}
static inline void VisitJSRegExpFields(Map* map,
HeapObject* object) {
int last_property_offset =
JSRegExp::kSize + kPointerSize * map->inobject_properties();
- VisitPointers(map->heap(),
+ VisitPointers(map->GetHeap(),
SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
SLOT_ADDR(object, last_property_offset));
}
@@ -995,7 +1416,9 @@ class CodeMarkingVisitor : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- collector_->MarkObject(it.frame()->unchecked_code());
+ Code* code = it.frame()->unchecked_code();
+ MarkBit code_bit = Marking::MarkBitFrom(code);
+ collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
}
}
@@ -1017,8 +1440,10 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- collector_->MarkObject(shared->unchecked_code());
- collector_->MarkObject(shared);
+ MarkBit shared_mark = Marking::MarkBitFrom(shared);
+ MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
+ collector_->MarkObject(shared->unchecked_code(), code_mark);
+ collector_->MarkObject(shared, shared_mark);
}
}
@@ -1030,7 +1455,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
void MarkCompactCollector::PrepareForCodeFlushing() {
ASSERT(heap() == Isolate::Current()->heap());
- if (!FLAG_flush_code) {
+ // TODO(1609) Currently incremental marker does not support code flushing.
+ if (!FLAG_flush_code || was_marked_incrementally_) {
EnableCodeFlushing(false);
return;
}
@@ -1042,16 +1468,21 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
return;
}
#endif
+
EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
- MarkObject(heap()->raw_unchecked_empty_descriptor_array());
+ HeapObject* descriptor_array = heap()->empty_descriptor_array();
+ MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
+ MarkObject(descriptor_array, descriptor_array_mark);
// Make sure we are not referencing the code from the stack.
ASSERT(this == heap()->mark_compact_collector());
for (StackFrameIterator it; !it.done(); it.Advance()) {
- MarkObject(it.frame()->unchecked_code());
+ Code* code = it.frame()->unchecked_code();
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ MarkObject(code, code_mark);
}
// Iterate the archived stacks in all threads to check if
@@ -1064,7 +1495,7 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
- ProcessMarkingStack();
+ ProcessMarkingDeque();
}
@@ -1088,19 +1519,21 @@ class RootMarkingVisitor : public ObjectVisitor {
// Replace flat cons strings in place.
HeapObject* object = ShortCircuitConsString(p);
- if (object->IsMarked()) return;
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ if (mark_bit.Get()) return;
Map* map = object->map();
// Mark the object.
- collector_->SetMark(object);
+ collector_->SetMark(object, mark_bit);
// Mark the map pointer and body, and push them on the marking stack.
- collector_->MarkObject(map);
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ collector_->MarkObject(map, map_mark);
StaticMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- collector_->EmptyMarkingStack();
+ collector_->EmptyMarkingDeque();
}
MarkCompactCollector* collector_;
@@ -1116,17 +1549,19 @@ class SymbolTableCleaner : public ObjectVisitor {
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
+ Object* o = *p;
+ if (o->IsHeapObject() &&
+ !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
// Check if the symbol being pruned is an external symbol. We need to
// delete the associated external data as this symbol is going away.
// Since no objects have yet been moved we can safely access the map of
// the object.
- if ((*p)->IsExternalString()) {
+ if (o->IsExternalString()) {
heap_->FinalizeExternalString(String::cast(*p));
}
- // Set the entry to null_value (as deleted).
- *p = heap_->raw_unchecked_null_value();
+ // Set the entry to the_hole_value (as deleted).
+ *p = heap_->the_hole_value();
pointers_removed_++;
}
}
@@ -1147,8 +1582,7 @@ class SymbolTableCleaner : public ObjectVisitor {
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- MapWord first_word = HeapObject::cast(object)->map_word();
- if (first_word.IsMarked()) {
+ if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
return object;
} else {
return NULL;
@@ -1157,28 +1591,26 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
};
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
- ASSERT(!object->IsMarked());
+void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
+ ASSERT(IsMarked(object));
ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
if (FLAG_cleanup_code_caches_at_gc) {
map->ClearCodeCache(heap());
}
- SetMark(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
// Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(map);
} else {
- marking_stack_.Push(map);
+ marking_deque_.PushBlack(map);
}
} else {
- SetMark(object);
- marking_stack_.Push(object);
+ marking_deque_.PushBlack(object);
}
}
@@ -1187,12 +1619,17 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions.
- FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
- if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
+ FixedArray* prototype_transitions = map->prototype_transitions();
+ MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+ if (!mark.Get()) {
+ mark.Set();
+ MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
+ prototype_transitions->Size());
+ }
- Object* raw_descriptor_array =
- *HeapObject::RawField(map,
- Map::kInstanceDescriptorsOrBitField3Offset);
+ Object** raw_descriptor_array_slot =
+ HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
+ Object* raw_descriptor_array = *raw_descriptor_array_slot;
if (!raw_descriptor_array->IsSmi()) {
MarkDescriptorArray(
reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
@@ -1206,24 +1643,26 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
- StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
+ StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
- if (descriptors->IsMarked()) return;
+ MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
+ if (descriptors_mark.Get()) return;
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
- SetMark(descriptors);
+ ASSERT(descriptors != heap()->empty_descriptor_array());
+ SetMark(descriptors, descriptors_mark);
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
- ASSERT(!contents->IsMarked());
+ ASSERT(!IsMarked(contents));
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
- SetMark(contents);
+ MarkBit contents_mark = Marking::MarkBitFrom(contents);
+ SetMark(contents, contents_mark);
// Contents contains (value, details) pairs. If the details say that the type
// of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
@@ -1233,27 +1672,44 @@ void MarkCompactCollector::MarkDescriptorArray(
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
- HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
- if (object->IsHeapObject() && !object->IsMarked()) {
- SetMark(object);
- marking_stack_.Push(object);
+
+ Object** slot = contents->data_start() + i;
+ Object* value = *slot;
+ if (!value->IsHeapObject()) continue;
+
+ RecordSlot(slot, slot, *slot);
+
+ if (details.IsProperty()) {
+ HeapObject* object = HeapObject::cast(value);
+ MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
+ if (!mark.Get()) {
+ SetMark(HeapObject::cast(object), mark);
+ marking_deque_.PushBlack(object);
+ }
+ } else if (details.type() == ELEMENTS_TRANSITION && value->IsFixedArray()) {
+ // For maps with multiple elements transitions, the transition maps are
+ // stored in a FixedArray. Keep the fixed array alive but not the maps
+ // that it refers to.
+ HeapObject* object = HeapObject::cast(value);
+ MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
+ if (!mark.Get()) {
+ SetMark(HeapObject::cast(object), mark);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
- marking_stack_.Push(descriptors);
+ marking_deque_.PushBlack(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(heap()->map_space());
- for (HeapObject* next_object = iterator.next();
- next_object != NULL; next_object = iterator.next()) {
- if (next_object->IsMap()) { // Could also be ByteArray on free list.
+ for (HeapObject* next_object = iterator.Next();
+ next_object != NULL; next_object = iterator.Next()) {
+ if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
Map* map = Map::cast(next_object);
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
map->CreateBackPointers();
} else {
@@ -1264,54 +1720,123 @@ void MarkCompactCollector::CreateBackPointers() {
}
-static int OverflowObjectSize(HeapObject* obj) {
- // Recover the normal map pointer, it might be marked as live and
- // overflowed.
- MapWord map_word = obj->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return obj->SizeFromMap(map_word.ToMap());
+// Fill the marking stack with overflowed objects returned by the given
+// iterator. Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template<class T>
+static void DiscoverGreyObjectsWithIterator(Heap* heap,
+ MarkingDeque* marking_deque,
+ T* it) {
+ // The caller should ensure that the marking stack is initially not full,
+ // so that we don't waste effort pointlessly scanning for objects.
+ ASSERT(!marking_deque->IsFull());
+
+ Map* filler_map = heap->one_pointer_filler_map();
+ for (HeapObject* object = it->Next();
+ object != NULL;
+ object = it->Next()) {
+ MarkBit markbit = Marking::MarkBitFrom(object);
+ if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
+ Marking::GreyToBlack(markbit);
+ MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+ marking_deque->PushBlack(object);
+ if (marking_deque->IsFull()) return;
+ }
+ }
}
-class OverflowedObjectsScanner : public AllStatic {
- public:
- // Fill the marking stack with overflowed objects returned by the given
- // iterator. Stop when the marking stack is filled or the end of the space
- // is reached, whichever comes first.
- template<class T>
- static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
- T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!collector->marking_stack_.is_full());
-
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
- if (object->IsOverflowed()) {
- object->ClearOverflow();
- ASSERT(object->IsMarked());
- ASSERT(HEAP->Contains(object));
- collector->marking_stack_.Push(object);
- if (collector->marking_stack_.is_full()) return;
- }
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
+
+
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ MarkBit::CellType* cells = p->markbits()->cells();
+
+ int last_cell_index =
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+ int cell_index = Page::kFirstUsedCell;
+ Address cell_base = p->ObjectAreaStart();
+
+ for (cell_index = Page::kFirstUsedCell;
+ cell_index < last_cell_index;
+ cell_index++, cell_base += 32 * kPointerSize) {
+ ASSERT((unsigned)cell_index ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(cell_base))));
+
+ const MarkBit::CellType current_cell = cells[cell_index];
+ if (current_cell == 0) continue;
+
+ const MarkBit::CellType next_cell = cells[cell_index + 1];
+ MarkBit::CellType grey_objects = current_cell &
+ ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+
+ int offset = 0;
+ while (grey_objects != 0) {
+ int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
+ grey_objects >>= trailing_zeros;
+ offset += trailing_zeros;
+ MarkBit markbit(&cells[cell_index], 1 << offset, false);
+ ASSERT(Marking::IsGrey(markbit));
+ Marking::GreyToBlack(markbit);
+ Address addr = cell_base + offset * kPointerSize;
+ HeapObject* object = HeapObject::FromAddress(addr);
+ MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+ marking_deque->PushBlack(object);
+ if (marking_deque->IsFull()) return;
+ offset += 2;
+ grey_objects >>= 2;
}
+
+ grey_objects >>= (Bitmap::kBitsPerCell - 1);
}
-};
+}
+
+
+static void DiscoverGreyObjectsInSpace(Heap* heap,
+ MarkingDeque* marking_deque,
+ PagedSpace* space) {
+ if (!space->was_swept_conservatively()) {
+ HeapObjectIterator it(space);
+ DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
+ } else {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ DiscoverGreyObjectsOnPage(marking_deque, p);
+ if (marking_deque->IsFull()) return;
+ }
+ }
+}
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
- return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
+ Object* o = *p;
+ if (!o->IsHeapObject()) return false;
+ HeapObject* heap_object = HeapObject::cast(o);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ return !mark.Get();
}
void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+ SymbolTable* symbol_table = heap()->symbol_table();
// Mark the symbol table itself.
- SetMark(symbol_table);
+ MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
+ SetMark(symbol_table, symbol_table_mark);
// Explicitly mark the prefix.
MarkingVisitor marker(heap());
symbol_table->IteratePrefix(&marker);
- ProcessMarkingStack();
+ ProcessMarkingDeque();
}
@@ -1324,9 +1849,9 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
MarkSymbolTable();
// There may be overflowed objects in the heap. Visit them now.
- while (marking_stack_.overflowed()) {
- RefillMarkingStack();
- EmptyMarkingStack();
+ while (marking_deque_.overflowed()) {
+ RefillMarkingDeque();
+ EmptyMarkingDeque();
}
}
@@ -1344,9 +1869,13 @@ void MarkCompactCollector::MarkObjectGroups() {
bool group_marked = false;
for (size_t j = 0; j < entry->length_; j++) {
Object* object = *objects[j];
- if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
- group_marked = true;
- break;
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ if (mark.Get()) {
+ group_marked = true;
+ break;
+ }
}
}
@@ -1355,17 +1884,21 @@ void MarkCompactCollector::MarkObjectGroups() {
continue;
}
- // An object in the group is marked, so mark all heap objects in
- // the group.
+ // An object in the group is marked, so mark as grey all white heap
+ // objects in the group.
for (size_t j = 0; j < entry->length_; ++j) {
- if ((*objects[j])->IsHeapObject()) {
- MarkObject(HeapObject::cast(*objects[j]));
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ MarkObject(heap_object, mark);
}
}
- // Once the entire group has been marked, dispose it because it's
- // not needed anymore.
+ // Once the entire group has been colored grey, set the object group
+ // to NULL so it won't be processed again.
entry->Dispose();
+ object_groups->at(i) = NULL;
}
object_groups->Rewind(last);
}
@@ -1380,7 +1913,7 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
ImplicitRefGroup* entry = ref_groups->at(i);
ASSERT(entry != NULL);
- if (!(*entry->parent_)->IsMarked()) {
+ if (!IsMarked(*entry->parent_)) {
(*ref_groups)[last++] = entry;
continue;
}
@@ -1389,7 +1922,9 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// A parent object is marked, so mark all child heap objects.
for (size_t j = 0; j < entry->length_; ++j) {
if ((*children[j])->IsHeapObject()) {
- MarkObject(HeapObject::cast(*children[j]));
+ HeapObject* child = HeapObject::cast(*children[j]);
+ MarkBit mark = Marking::MarkBitFrom(child);
+ MarkObject(child, mark);
}
}
@@ -1405,21 +1940,17 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingStack() {
- while (!marking_stack_.is_empty()) {
- while (!marking_stack_.is_empty()) {
- HeapObject* object = marking_stack_.Pop();
+void MarkCompactCollector::EmptyMarkingDeque() {
+ while (!marking_deque_.IsEmpty()) {
+ while (!marking_deque_.IsEmpty()) {
+ HeapObject* object = marking_deque_.Pop();
ASSERT(object->IsHeapObject());
ASSERT(heap()->Contains(object));
- ASSERT(object->IsMarked());
- ASSERT(!object->IsOverflowed());
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
- // Because the object is marked, we have to recover the original map
- // pointer and use it to mark the object's body.
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- Map* map = map_word.ToMap();
- MarkObject(map);
+ Map* map = object->map();
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ MarkObject(map, map_mark);
StaticMarkingVisitor::IterateBody(map, object);
}
@@ -1436,39 +1967,45 @@ void MarkCompactCollector::EmptyMarkingStack() {
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
-void MarkCompactCollector::RefillMarkingStack() {
- ASSERT(marking_stack_.overflowed());
+void MarkCompactCollector::RefillMarkingDeque() {
+ ASSERT(marking_deque_.overflowed());
- SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
- if (marking_stack_.is_full()) return;
+ SemiSpaceIterator new_it(heap()->new_space());
+ DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
+ if (marking_deque_.IsFull()) return;
- HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
- &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
- if (marking_stack_.is_full()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->old_pointer_space());
+ if (marking_deque_.IsFull()) return;
- HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
- if (marking_stack_.is_full()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->old_data_space());
+ if (marking_deque_.IsFull()) return;
- HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
- if (marking_stack_.is_full()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->code_space());
+ if (marking_deque_.IsFull()) return;
- HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
- if (marking_stack_.is_full()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->map_space());
+ if (marking_deque_.IsFull()) return;
- HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
- if (marking_stack_.is_full()) return;
+ DiscoverGreyObjectsInSpace(heap(),
+ &marking_deque_,
+ heap()->cell_space());
+ if (marking_deque_.IsFull()) return;
- LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
- if (marking_stack_.is_full()) return;
+ LargeObjectIterator lo_it(heap()->lo_space());
+ DiscoverGreyObjectsWithIterator(heap(),
+ &marking_deque_,
+ &lo_it);
+ if (marking_deque_.IsFull()) return;
- marking_stack_.clear_overflowed();
+ marking_deque_.ClearOverflowed();
}
@@ -1476,23 +2013,23 @@ void MarkCompactCollector::RefillMarkingStack() {
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
-void MarkCompactCollector::ProcessMarkingStack() {
- EmptyMarkingStack();
- while (marking_stack_.overflowed()) {
- RefillMarkingStack();
- EmptyMarkingStack();
+void MarkCompactCollector::ProcessMarkingDeque() {
+ EmptyMarkingDeque();
+ while (marking_deque_.overflowed()) {
+ RefillMarkingDeque();
+ EmptyMarkingDeque();
}
}
void MarkCompactCollector::ProcessExternalMarking() {
bool work_to_do = true;
- ASSERT(marking_stack_.is_empty());
+ ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
MarkObjectGroups();
MarkImplicitRefGroups();
- work_to_do = !marking_stack_.is_empty();
- ProcessMarkingStack();
+ work_to_do = !marking_deque_.IsEmpty();
+ ProcessMarkingDeque();
}
}
@@ -1504,16 +2041,43 @@ void MarkCompactCollector::MarkLiveObjects() {
// with the C stack limit check.
PostponeInterruptsScope postpone(heap()->isolate());
+ bool incremental_marking_overflowed = false;
+ IncrementalMarking* incremental_marking = heap_->incremental_marking();
+ if (was_marked_incrementally_) {
+ // Finalize the incremental marking and check whether we had an overflow.
+ // Both markers use grey color to mark overflowed objects so
+ // non-incremental marker can deal with them as if overflow
+ // occured during normal marking.
+ // But incremental marker uses a separate marking deque
+ // so we have to explicitly copy it's overflow state.
+ incremental_marking->Finalize();
+ incremental_marking_overflowed =
+ incremental_marking->marking_deque()->overflowed();
+ incremental_marking->marking_deque()->ClearOverflowed();
+ } else {
+ // Abort any pending incremental activities e.g. incremental sweeping.
+ incremental_marking->Abort();
+ }
+
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
- // The to space contains live objects, the from space is used as a marking
- // stack.
- marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
- heap()->new_space()->FromSpaceHigh());
+ // The to space contains live objects, a page in from space is used as a
+ // marking stack.
+ Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
+ Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
+ if (FLAG_force_marking_deque_overflows) {
+ marking_deque_end = marking_deque_start + 64 * kPointerSize;
+ }
+ marking_deque_.Initialize(marking_deque_start,
+ marking_deque_end);
+ ASSERT(!marking_deque_.overflowed());
- ASSERT(!marking_stack_.overflowed());
+ if (incremental_marking_overflowed) {
+ // There are overflowed objects left in the heap after incremental marking.
+ marking_deque_.SetOverflowed();
+ }
PrepareForCodeFlushing();
@@ -1535,15 +2099,20 @@ void MarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedHeapObject);
// Then we mark the objects and process the transitive closure.
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- while (marking_stack_.overflowed()) {
- RefillMarkingStack();
- EmptyMarkingStack();
+ while (marking_deque_.overflowed()) {
+ RefillMarkingDeque();
+ EmptyMarkingDeque();
}
// Repeat host application specific marking to mark unmarked objects
// reachable from the weak roots.
ProcessExternalMarking();
+ AfterMarking();
+}
+
+
+void MarkCompactCollector::AfterMarking() {
// Object literal map caches reference symbols (cache keys) and maps
// (cache values). At this point still useful maps have already been
// marked. Mark the keys for the alive values before we process the
@@ -1553,7 +2122,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
- SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+ SymbolTable* symbol_table = heap()->symbol_table();
SymbolTableCleaner v(heap());
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
@@ -1582,13 +2151,13 @@ void MarkCompactCollector::ProcessMapCaches() {
Object* raw_context = heap()->global_contexts_list_;
while (raw_context != heap()->undefined_value()) {
Context* context = reinterpret_cast<Context*>(raw_context);
- if (context->IsMarked()) {
+ if (IsMarked(context)) {
HeapObject* raw_map_cache =
HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
// A map cache may be reachable from the stack. In this case
// it's already transitively marked and it's too late to clean
// up its parts.
- if (!raw_map_cache->IsMarked() &&
+ if (!IsMarked(raw_map_cache) &&
raw_map_cache != heap()->undefined_value()) {
MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
int existing_elements = map_cache->NumberOfElements();
@@ -1598,17 +2167,16 @@ void MarkCompactCollector::ProcessMapCaches() {
i += MapCache::kEntrySize) {
Object* raw_key = map_cache->get(i);
if (raw_key == heap()->undefined_value() ||
- raw_key == heap()->null_value()) continue;
+ raw_key == heap()->the_hole_value()) continue;
STATIC_ASSERT(MapCache::kEntrySize == 2);
Object* raw_map = map_cache->get(i + 1);
- if (raw_map->IsHeapObject() &&
- HeapObject::cast(raw_map)->IsMarked()) {
+ if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
++used_elements;
} else {
// Delete useless entries with unmarked maps.
ASSERT(raw_map->IsMap());
- map_cache->set_null_unchecked(heap(), i);
- map_cache->set_null_unchecked(heap(), i + 1);
+ map_cache->set_the_hole(i);
+ map_cache->set_the_hole(i + 1);
}
}
if (used_elements == 0) {
@@ -1618,64 +2186,38 @@ void MarkCompactCollector::ProcessMapCaches() {
// extra complexity during GC. We rely on subsequent cache
// usages (EnsureCapacity) to do this.
map_cache->ElementsRemoved(existing_elements - used_elements);
- MarkObject(map_cache);
+ MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
+ MarkObject(map_cache, map_cache_markbit);
}
}
}
// Move to next element in the list.
raw_context = context->get(Context::NEXT_CONTEXT_LINK);
}
- ProcessMarkingStack();
-}
-
-
-#ifdef DEBUG
-void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
- live_bytes_ += obj->Size();
- if (heap()->new_space()->Contains(obj)) {
- live_young_objects_size_ += obj->Size();
- } else if (heap()->map_space()->Contains(obj)) {
- ASSERT(obj->IsMap());
- live_map_objects_size_ += obj->Size();
- } else if (heap()->cell_space()->Contains(obj)) {
- ASSERT(obj->IsJSGlobalPropertyCell());
- live_cell_objects_size_ += obj->Size();
- } else if (heap()->old_pointer_space()->Contains(obj)) {
- live_old_pointer_objects_size_ += obj->Size();
- } else if (heap()->old_data_space()->Contains(obj)) {
- live_old_data_objects_size_ += obj->Size();
- } else if (heap()->code_space()->Contains(obj)) {
- live_code_objects_size_ += obj->Size();
- } else if (heap()->lo_space()->Contains(obj)) {
- live_lo_objects_size_ += obj->Size();
- } else {
- UNREACHABLE();
- }
+ ProcessMarkingDeque();
}
-#endif // DEBUG
-void MarkCompactCollector::SweepLargeObjectSpace() {
-#ifdef DEBUG
- ASSERT(state_ == MARK_LIVE_OBJECTS);
- state_ =
- compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
-#endif
- // Deallocate unmarked objects and clear marked bits for marked objects.
- heap()->lo_space()->FreeUnmarkedObjects();
-}
+void MarkCompactCollector::ReattachInitialMaps() {
+ HeapObjectIterator map_iterator(heap()->map_space());
+ for (HeapObject* obj = map_iterator.Next();
+ obj != NULL;
+ obj = map_iterator.Next()) {
+ if (obj->IsFreeSpace()) continue;
+ Map* map = Map::cast(obj);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
-// Safe to use during marking phase only.
-bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
- MapWord metamap = object->map_word();
- metamap.ClearMark();
- return metamap.ToMap()->instance_type() == MAP_TYPE;
+ if (map->attached_to_shared_function_info()) {
+ JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
+ }
+ }
}
void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
+ HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
@@ -1686,17 +2228,19 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
- for (HeapObject* obj = map_iterator.next();
- obj != NULL; obj = map_iterator.next()) {
+ for (HeapObject* obj = map_iterator.Next();
+ obj != NULL; obj = map_iterator.Next()) {
Map* map = reinterpret_cast<Map*>(obj);
- if (!map->IsMarked() && map->IsByteArray()) continue;
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ if (map->IsFreeSpace()) continue;
- ASSERT(SafeIsMap(map));
+ ASSERT(map->IsMap());
// Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
- if (map->IsMarked() && map->attached_to_shared_function_info()) {
+ if (map_mark.Get() &&
+ map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
// from SharedFunctionInfo during the mark phase.
// Since it survived the GC, reattach it now.
@@ -1705,52 +2249,55 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Clear dead prototype transitions.
int number_of_transitions = map->NumberOfProtoTransitions();
- if (number_of_transitions > 0) {
- FixedArray* prototype_transitions =
- map->unchecked_prototype_transitions();
- int new_number_of_transitions = 0;
- const int header = Map::kProtoTransitionHeaderSize;
- const int proto_offset =
- header + Map::kProtoTransitionPrototypeOffset;
- const int map_offset = header + Map::kProtoTransitionMapOffset;
- const int step = Map::kProtoTransitionElementsPerEntry;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* prototype = prototype_transitions->get(proto_offset + i * step);
- Object* cached_map = prototype_transitions->get(map_offset + i * step);
- if (HeapObject::cast(prototype)->IsMarked() &&
- HeapObject::cast(cached_map)->IsMarked()) {
- if (new_number_of_transitions != i) {
- prototype_transitions->set_unchecked(
- heap_,
- proto_offset + new_number_of_transitions * step,
- prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(
- heap_,
- map_offset + new_number_of_transitions * step,
- cached_map,
- SKIP_WRITE_BARRIER);
- }
- new_number_of_transitions++;
+ FixedArray* prototype_transitions = map->prototype_transitions();
+
+ int new_number_of_transitions = 0;
+ const int header = Map::kProtoTransitionHeaderSize;
+ const int proto_offset =
+ header + Map::kProtoTransitionPrototypeOffset;
+ const int map_offset = header + Map::kProtoTransitionMapOffset;
+ const int step = Map::kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* prototype = prototype_transitions->get(proto_offset + i * step);
+ Object* cached_map = prototype_transitions->get(map_offset + i * step);
+ if (IsMarked(prototype) && IsMarked(cached_map)) {
+ if (new_number_of_transitions != i) {
+ prototype_transitions->set_unchecked(
+ heap_,
+ proto_offset + new_number_of_transitions * step,
+ prototype,
+ UPDATE_WRITE_BARRIER);
+ prototype_transitions->set_unchecked(
+ heap_,
+ map_offset + new_number_of_transitions * step,
+ cached_map,
+ SKIP_WRITE_BARRIER);
}
}
// Fill slots that became free with undefined value.
- Object* undefined = heap()->raw_unchecked_undefined_value();
+ Object* undefined = heap()->undefined_value();
for (int i = new_number_of_transitions * step;
i < number_of_transitions * step;
i++) {
+ // The undefined object is on a page that is never compacted and never
+ // in new space so it is OK to skip the write barrier. Also it's a
+ // root.
prototype_transitions->set_unchecked(heap_,
header + i,
undefined,
SKIP_WRITE_BARRIER);
+
+ Object** undefined_slot =
+ prototype_transitions->data_start() + i;
+ RecordSlot(undefined_slot, undefined_slot, undefined);
}
map->SetNumberOfProtoTransitions(new_number_of_transitions);
}
// Follow the chain of back pointers to find the prototype.
Map* current = map;
- while (SafeIsMap(current)) {
+ while (current->IsMap()) {
current = reinterpret_cast<Map*>(current->prototype());
ASSERT(current->IsHeapObject());
}
@@ -1759,21 +2306,28 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Follow back pointers, setting them to prototype,
// clearing map transitions when necessary.
current = map;
- bool on_dead_path = !current->IsMarked();
+ bool on_dead_path = !map_mark.Get();
Object* next;
- while (SafeIsMap(current)) {
+ while (current->IsMap()) {
next = current->prototype();
// There should never be a dead map above a live map.
- ASSERT(on_dead_path || current->IsMarked());
+ MarkBit current_mark = Marking::MarkBitFrom(current);
+ bool is_alive = current_mark.Get();
+ ASSERT(on_dead_path || is_alive);
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
- if (on_dead_path && current->IsMarked()) {
+ if (on_dead_path && is_alive) {
on_dead_path = false;
current->ClearNonLiveTransitions(heap(), real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
+
+ if (is_alive) {
+ Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
+ RecordSlot(slot, slot, real_prototype);
+ }
current = reinterpret_cast<Map*>(next);
}
}
@@ -1783,13 +2337,13 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
void MarkCompactCollector::ProcessWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+ ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = weak_map->unchecked_table();
+ ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
for (int i = 0; i < table->Capacity(); i++) {
- if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+ if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
Object* value = table->get(table->EntryToValueIndex(i));
- StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
+ StaticMarkingVisitor::VisitPointer(heap(), &value);
table->set_unchecked(heap(),
table->EntryToValueIndex(i),
value,
@@ -1804,12 +2358,12 @@ void MarkCompactCollector::ProcessWeakMaps() {
void MarkCompactCollector::ClearWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+ ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
- ObjectHashTable* table = weak_map->unchecked_table();
+ ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
for (int i = 0; i < table->Capacity(); i++) {
- if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
- table->RemoveEntry(i, heap());
+ if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+ table->RemoveEntry(i);
}
}
weak_map_obj = weak_map->next();
@@ -1818,316 +2372,97 @@ void MarkCompactCollector::ClearWeakMaps() {
set_encountered_weak_maps(Smi::FromInt(0));
}
-// -------------------------------------------------------------------------
-// Phase 2: Encode forwarding addresses.
-// When compacting, forwarding addresses for objects in old space and map
-// space are encoded in their map pointer word (along with an encoding of
-// their map pointers).
-//
-// The excact encoding is described in the comments for class MapWord in
-// objects.h.
+
+// We scavange new space simultaneously with sweeping. This is done in two
+// passes.
//
-// An address range [start, end) can have both live and non-live objects.
-// Maximal non-live regions are marked so they can be skipped on subsequent
-// sweeps of the heap. A distinguished map-pointer encoding is used to mark
-// free regions of one-word size (in which case the next word is the start
-// of a live object). A second distinguished map-pointer encoding is used
-// to mark free regions larger than one word, and the size of the free
-// region (including the first word) is written to the second word of the
-// region.
+// The first pass migrates all alive objects from one semispace to another or
+// promotes them to old space. Forwarding address is written directly into
+// first word of object without any encoding. If object is dead we write
+// NULL as a forwarding address.
//
-// Any valid map page offset must lie in the object area of the page, so map
-// page offsets less than Page::kObjectStartOffset are invalid. We use a
-// pair of distinguished invalid map encodings (for single word and multiple
-// words) to indicate free regions in the page found during computation of
-// forwarding addresses and skipped over in subsequent sweeps.
-
-
-// Encode a free region, defined by the given start address and size, in the
-// first word or two of the region.
-void EncodeFreeRegion(Address free_start, int free_size) {
- ASSERT(free_size >= kIntSize);
- if (free_size == kIntSize) {
- Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
- } else {
- ASSERT(free_size >= 2 * kIntSize);
- Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
- Memory::int_at(free_start + kIntSize) = free_size;
- }
+// The second pass updates pointers to new space in all spaces. It is possible
+// to encounter pointers to dead new space objects during traversal of pointers
+// to new space. We should clear them to avoid encountering them during next
+// pointer iteration. This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
+void MarkCompactCollector::MigrateObject(Address dst,
+ Address src,
+ int size,
+ AllocationSpace dest) {
+ HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+ if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+ Address src_slot = src;
+ Address dst_slot = dst;
+ ASSERT(IsAligned(size, kPointerSize));
+
+ for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+ Object* value = Memory::Object_at(src_slot);
+
+ Memory::Object_at(dst_slot) = value;
+
+ if (heap_->InNewSpace(value)) {
+ heap_->store_buffer()->Mark(dst_slot);
+ } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ &migration_slots_buffer_,
+ reinterpret_cast<Object**>(dst_slot),
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
-#ifdef DEBUG
- // Zap the body of the free region.
- if (FLAG_enable_slow_asserts) {
- for (int offset = 2 * kIntSize;
- offset < free_size;
- offset += kPointerSize) {
- Memory::Address_at(free_start + offset) = kZapValue;
+ src_slot += kPointerSize;
+ dst_slot += kPointerSize;
}
- }
-#endif
-}
-
-
-// Try to promote all objects in new space. Heap numbers and sequential
-// strings are promoted to the code space, large objects to large object space,
-// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
- HeapObject* object,
- int object_size) {
- MaybeObject* forwarded;
- if (object_size > heap->MaxObjectSizeInPagedSpace()) {
- forwarded = Failure::Exception();
- } else {
- OldSpace* target_space = heap->TargetSpace(object);
- ASSERT(target_space == heap->old_pointer_space() ||
- target_space == heap->old_data_space());
- forwarded = target_space->MCAllocateRaw(object_size);
- }
- Object* result;
- if (!forwarded->ToObject(&result)) {
- result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
- }
- return result;
-}
-
-
-// Allocation functions for the paged spaces call the space's MCAllocateRaw.
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
- Heap *heap,
- HeapObject* ignore,
- int object_size) {
- return heap->old_pointer_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
- Heap* heap,
- HeapObject* ignore,
- int object_size) {
- return heap->old_data_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
- Heap* heap,
- HeapObject* ignore,
- int object_size) {
- return heap->code_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
- Heap* heap,
- HeapObject* ignore,
- int object_size) {
- return heap->map_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
- Heap* heap, HeapObject* ignore, int object_size) {
- return heap->cell_space()->MCAllocateRaw(object_size);
-}
-
-
-// The forwarding address is encoded at the same offset as the current
-// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* ignored) {
- int offset =
- heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
- Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
- HeapObject::cast(new_object)->address();
-}
-
-
-// The forwarding address is encoded in the map pointer of the object as an
-// offset (in terms of live bytes) from the address of the first live object
-// in the page.
-inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
- HeapObject* old_object,
- int object_size,
- Object* new_object,
- int* offset) {
- // Record the forwarding address of the first live object if necessary.
- if (*offset == 0) {
- Page::FromAddress(old_object->address())->mc_first_forwarded =
- HeapObject::cast(new_object)->address();
- }
-
- MapWord encoding =
- MapWord::EncodeAddress(old_object->map()->address(), *offset);
- old_object->set_map_word(encoding);
- *offset += object_size;
- ASSERT(*offset <= Page::kObjectAreaSize);
-}
-
-// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
-
-
-// Function template that, given a range of addresses (eg, a semispace or a
-// paged space page), iterates through the objects in the range to clear
-// mark bits and compute and encode forwarding addresses. As a side effect,
-// maximal free chunks are marked so that they can be skipped on subsequent
-// sweeps.
-//
-// The template parameters are an allocation function, a forwarding address
-// encoding function, and a function to process non-live objects.
-template<MarkCompactCollector::AllocationFunction Alloc,
- MarkCompactCollector::EncodingFunction Encode,
- MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
- Address start,
- Address end,
- int* offset) {
- // The start address of the current free region while sweeping the space.
- // This address is set when a transition from live to non-live objects is
- // encountered. A value (an encoding of the 'next free region' pointer)
- // is written to memory at this address when a transition from non-live to
- // live objects is encountered.
- Address free_start = NULL;
-
- // A flag giving the state of the previously swept object. Initially true
- // to ensure that free_start is initialized to a proper address before
- // trying to write to it.
- bool is_prev_alive = true;
-
- int object_size; // Will be set on each iteration of the loop.
- for (Address current = start; current < end; current += object_size) {
- HeapObject* object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
- collector->tracer()->decrement_marked_count();
- object_size = object->Size();
-
- Object* forwarded =
- Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
- Encode(collector->heap(), object, object_size, forwarded, offset);
+ if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
+ Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+ Address code_entry = Memory::Address_at(code_entry_slot);
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("forward %p -> %p.\n", object->address(),
- HeapObject::cast(forwarded)->address());
- }
-#endif
- if (!is_prev_alive) { // Transition from non-live to live.
- EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
- is_prev_alive = true;
- }
- } else { // Non-live object.
- object_size = object->Size();
- ProcessNonLive(object, collector->heap()->isolate());
- if (is_prev_alive) { // Transition from live to non-live.
- free_start = current;
- is_prev_alive = false;
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ &migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
}
- LiveObjectList::ProcessNonLive(object);
}
- }
-
- // If we ended on a free region, mark it.
- if (!is_prev_alive) {
- EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
- }
-}
-
-
-// Functions to encode the forwarding pointers in each compactable space.
-void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
- int ignored;
- EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
- EncodeForwardingAddressInNewSpace,
- IgnoreNonLiveObject>(
- this,
- heap()->new_space()->bottom(),
- heap()->new_space()->top(),
- &ignored);
-}
-
-
-template<MarkCompactCollector::AllocationFunction Alloc,
- MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
- PagedSpace* space) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
-
- // The offset of each live object in the page from the first live object
- // in the page.
- int offset = 0;
- EncodeForwardingAddressesInRange<Alloc,
- EncodeForwardingAddressInPagedSpace,
- ProcessNonLive>(
- this,
- p->ObjectAreaStart(),
- p->AllocationTop(),
- &offset);
- }
-}
-
-
-// We scavange new space simultaneously with sweeping. This is done in two
-// passes.
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwading address is written directly into
-// first word of object without any encoding. If object is dead we are writing
-// NULL as a forwarding address.
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of dirty regions we
-// should clear them to avoid encountering them during next dirty regions
-// iteration.
-static void MigrateObject(Heap* heap,
- Address dst,
- Address src,
- int size,
- bool to_old_space) {
- if (to_old_space) {
- heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ } else if (dest == CODE_SPACE) {
+ PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
+ heap()->MoveBlock(dst, src, size);
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ &migration_slots_buffer_,
+ SlotsBuffer::RELOCATED_CODE_OBJECT,
+ dst,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
} else {
- heap->CopyBlock(dst, src, size);
+ ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+ heap()->MoveBlock(dst, src, size);
}
-
Memory::Address_at(src) = dst;
}
-class StaticPointersToNewGenUpdatingVisitor : public
- StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
- public:
- static inline void VisitPointer(Heap* heap, Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
- Address old_addr = obj->address();
-
- if (heap->new_space()->Contains(obj)) {
- ASSERT(heap->InFromSpace(*p));
- *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
- }
- }
-};
-
-
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
-class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
+class PointersUpdatingVisitor: public ObjectVisitor {
public:
- explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+ explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
void VisitPointer(Object** p) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
+ UpdatePointer(p);
}
void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
- }
+ for (Object** p = start; p < end; p++) UpdatePointer(p);
+ }
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ Object* target = rinfo->target_object();
+ VisitPointer(&target);
+ rinfo->set_target_object(target);
}
void VisitCodeTarget(RelocInfo* rinfo) {
@@ -2147,68 +2482,96 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
+ static inline void UpdateSlot(Heap* heap, Object** slot) {
+ Object* obj = *slot;
+
+ if (!obj->IsHeapObject()) return;
+
+ HeapObject* heap_obj = HeapObject::cast(obj);
+
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ ASSERT(heap->InFromSpace(heap_obj) ||
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+ HeapObject* target = map_word.ToForwardingAddress();
+ *slot = target;
+ ASSERT(!heap->InFromSpace(target) &&
+ !MarkCompactCollector::IsOnEvacuationCandidate(target));
+ }
+ }
+
private:
+ inline void UpdatePointer(Object** p) {
+ UpdateSlot(heap_, p);
+ }
+
Heap* heap_;
};
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It can encounter pointers to dead objects in new space when traversing map
-// space (see comment for MigrateObject).
-static void UpdatePointerToNewGen(HeapObject** p) {
- if (!(*p)->IsHeapObject()) return;
+static void UpdatePointer(HeapObject** p, HeapObject* object) {
+ ASSERT(*p == object);
- Address old_addr = (*p)->address();
- ASSERT(HEAP->InFromSpace(*p));
+ Address old_addr = object->address();
Address new_addr = Memory::Address_at(old_addr);
- if (new_addr == NULL) {
- // We encountered pointer to a dead object. Clear it so we will
- // not visit it again during next iteration of dirty regions.
- *p = NULL;
- } else {
+ // The new space sweep will overwrite the map word of dead objects
+ // with NULL. In this case we do not need to transfer this entry to
+ // the store buffer which we are rebuilding.
+ if (new_addr != NULL) {
*p = HeapObject::FromAddress(new_addr);
+ } else {
+ // We have to zap this pointer, because the store buffer may overflow later,
+ // and then we have to scan the entire heap and we don't want to find
+ // spurious newspace pointers in the old space.
+ *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
}
}
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
- Address old_addr = HeapObject::cast(*p)->address();
- Address new_addr = Memory::Address_at(old_addr);
- return String::cast(HeapObject::FromAddress(new_addr));
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
+ MapWord map_word = HeapObject::cast(*p)->map_word();
+
+ if (map_word.IsForwardingAddress()) {
+ return String::cast(map_word.ToForwardingAddress());
+ }
+
+ return String::cast(*p);
}
-static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
+ int object_size) {
Object* result;
- if (object_size > heap->MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
MaybeObject* maybe_result =
- heap->lo_space()->AllocateRawFixedArray(object_size);
+ heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(heap, target->address(), object->address(), object_size,
- true);
- heap->mark_compact_collector()->tracer()->
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ LO_SPACE);
+ heap()->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
} else {
- OldSpace* target_space = heap->TargetSpace(object);
+ OldSpace* target_space = heap()->TargetSpace(object);
- ASSERT(target_space == heap->old_pointer_space() ||
- target_space == heap->old_data_space());
+ ASSERT(target_space == heap()->old_pointer_space() ||
+ target_space == heap()->old_data_space());
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(heap,
- target->address(),
+ MigrateObject(target->address(),
object->address(),
object_size,
- target_space == heap->old_pointer_space());
- heap->mark_compact_collector()->tracer()->
+ target_space->identity());
+ heap()->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
@@ -2218,1145 +2581,1294 @@ static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
}
-static void SweepNewSpace(Heap* heap, NewSpace* space) {
- heap->CheckNewSpaceExpansionCriteria();
+void MarkCompactCollector::EvacuateNewSpace() {
+ // There are soft limits in the allocation code, designed trigger a mark
+ // sweep collection by failing allocations. But since we are already in
+ // a mark-sweep allocation, there is no sense in trying to trigger one.
+ AlwaysAllocateScope scope;
+ heap()->CheckNewSpaceExpansionCriteria();
+
+ NewSpace* new_space = heap()->new_space();
- Address from_bottom = space->bottom();
- Address from_top = space->top();
+ // Store allocation range before flipping semispaces.
+ Address from_bottom = new_space->bottom();
+ Address from_top = new_space->top();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
- space->Flip();
- space->ResetAllocationInfo();
+ new_space->Flip();
+ new_space->ResetAllocationInfo();
- int size = 0;
int survivors_size = 0;
// First pass: traverse all objects in inactive semispace, remove marks,
- // migrate live objects and write forwarding addresses.
- for (Address current = from_bottom; current < from_top; current += size) {
- HeapObject* object = HeapObject::FromAddress(current);
-
- if (object->IsMarked()) {
- object->ClearMark();
- heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
- size = object->Size();
+ // migrate live objects and write forwarding addresses. This stage puts
+ // new entries in the store buffer and may cause some pages to be marked
+ // scan-on-scavenge.
+ SemiSpaceIterator from_it(from_bottom, from_top);
+ for (HeapObject* object = from_it.Next();
+ object != NULL;
+ object = from_it.Next()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ if (mark_bit.Get()) {
+ mark_bit.Clear();
+ // Don't bother decrementing live bytes count. We'll discard the
+ // entire page at the end.
+ int size = object->Size();
survivors_size += size;
// Aggressively promote young survivors to the old space.
- if (TryPromoteObject(heap, object, size)) {
+ if (TryPromoteObject(object, size)) {
continue;
}
// Promotion failed. Just migrate object to another semispace.
- // Allocation cannot fail at this point: semispaces are of equal size.
- Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
+ MaybeObject* allocation = new_space->AllocateRaw(size);
+ if (allocation->IsFailure()) {
+ if (!new_space->AddFreshPage()) {
+ // Shouldn't happen. We are sweeping linearly, and to-space
+ // has the same number of pages as from-space, so there is
+ // always room.
+ UNREACHABLE();
+ }
+ allocation = new_space->AllocateRaw(size);
+ ASSERT(!allocation->IsFailure());
+ }
+ Object* target = allocation->ToObjectUnchecked();
- MigrateObject(heap,
- HeapObject::cast(target)->address(),
- current,
+ MigrateObject(HeapObject::cast(target)->address(),
+ object->address(),
size,
- false);
+ NEW_SPACE);
} else {
// Process the dead object before we write a NULL into its header.
LiveObjectList::ProcessNonLive(object);
- size = object->Size();
- Memory::Address_at(current) = NULL;
+ // Mark dead objects in the new space with null in their map field.
+ Memory::Address_at(object->address()) = NULL;
}
}
- // Second pass: find pointers to new space and update them.
- PointersToNewGenUpdatingVisitor updating_visitor(heap);
-
- // Update pointers in to space.
- Address current = space->bottom();
- while (current < space->top()) {
- HeapObject* object = HeapObject::FromAddress(current);
- current +=
- StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
- object);
- }
-
- // Update roots.
- heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- LiveObjectList::IterateElements(&updating_visitor);
-
- // Update pointers in old spaces.
- heap->IterateDirtyRegions(heap->old_pointer_space(),
- &Heap::IteratePointersInDirtyRegion,
- &UpdatePointerToNewGen,
- heap->WATERMARK_SHOULD_BE_VALID);
-
- heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
-
- // Update pointers from cells.
- HeapObjectIterator cell_iterator(heap->cell_space());
- for (HeapObject* cell = cell_iterator.next();
- cell != NULL;
- cell = cell_iterator.next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- }
- }
-
- // Update pointer from the global contexts list.
- updating_visitor.VisitPointer(heap->global_contexts_list_address());
-
- // Update pointers from external string table.
- heap->UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
- // All pointers were updated. Update auxiliary allocation info.
- heap->IncrementYoungSurvivorsCounter(survivors_size);
- space->set_age_mark(space->top());
-
- // Update JSFunction pointers from the runtime profiler.
- heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+ heap_->IncrementYoungSurvivorsCounter(survivors_size);
+ new_space->set_age_mark(new_space->top());
}
-static void SweepSpace(Heap* heap, PagedSpace* space) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- // During sweeping of paged space we are trying to find longest sequences
- // of pages without live objects and free them (instead of putting them on
- // the free list).
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+ AlwaysAllocateScope always_allocate;
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
+ MarkBit::CellType* cells = p->markbits()->cells();
+ p->MarkSweptPrecisely();
- // Page preceding current.
- Page* prev = Page::FromAddress(NULL);
+ int last_cell_index =
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
- // First empty page in a sequence.
- Page* first_empty_page = Page::FromAddress(NULL);
+ int cell_index = Page::kFirstUsedCell;
+ Address cell_base = p->ObjectAreaStart();
+ int offsets[16];
- // Page preceding first empty page.
- Page* prec_first_empty_page = Page::FromAddress(NULL);
+ for (cell_index = Page::kFirstUsedCell;
+ cell_index < last_cell_index;
+ cell_index++, cell_base += 32 * kPointerSize) {
+ ASSERT((unsigned)cell_index ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(cell_base))));
+ if (cells[cell_index] == 0) continue;
- // If last used page of space ends with a sequence of dead objects
- // we can adjust allocation top instead of puting this free area into
- // the free list. Thus during sweeping we keep track of such areas
- // and defer their deallocation until the sweeping of the next page
- // is done: if one of the next pages contains live objects we have
- // to put such area into the free list.
- Address last_free_start = NULL;
- int last_free_size = 0;
+ int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+ for (int i = 0; i < live_objects; i++) {
+ Address object_addr = cell_base + offsets[i] * kPointerSize;
+ HeapObject* object = HeapObject::FromAddress(object_addr);
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
- while (it.has_next()) {
- Page* p = it.next();
+ int size = object->Size();
- bool is_previous_alive = true;
- Address free_start = NULL;
- HeapObject* object;
-
- for (Address current = p->ObjectAreaStart();
- current < p->AllocationTop();
- current += object->Size()) {
- object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
- heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
- if (!is_previous_alive) { // Transition from free to live.
- space->DeallocateBlock(free_start,
- static_cast<int>(current - free_start),
- true);
- is_previous_alive = true;
- }
- } else {
- heap->mark_compact_collector()->ReportDeleteIfNeeded(
- object, heap->isolate());
- if (is_previous_alive) { // Transition from live to free.
- free_start = current;
- is_previous_alive = false;
- }
- LiveObjectList::ProcessNonLive(object);
- }
- // The object is now unmarked for the call to Size() at the top of the
- // loop.
- }
-
- bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
- || (!is_previous_alive && free_start == p->ObjectAreaStart());
-
- if (page_is_empty) {
- // This page is empty. Check whether we are in the middle of
- // sequence of empty pages and start one if not.
- if (!first_empty_page->is_valid()) {
- first_empty_page = p;
- prec_first_empty_page = prev;
- }
-
- if (!is_previous_alive) {
- // There are dead objects on this page. Update space accounting stats
- // without putting anything into free list.
- int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
- if (size_in_bytes > 0) {
- space->DeallocateBlock(free_start, size_in_bytes, false);
- }
- }
- } else {
- // This page is not empty. Sequence of empty pages ended on the previous
- // one.
- if (first_empty_page->is_valid()) {
- space->FreePages(prec_first_empty_page, prev);
- prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
+ MaybeObject* target = space->AllocateRaw(size);
+ if (target->IsFailure()) {
+ // OS refused to give us memory.
+ V8::FatalProcessOutOfMemory("Evacuation");
+ return;
}
- // If there is a free ending area on one of the previous pages we have
- // deallocate that area and put it on the free list.
- if (last_free_size > 0) {
- Page::FromAddress(last_free_start)->
- SetAllocationWatermark(last_free_start);
- space->DeallocateBlock(last_free_start, last_free_size, true);
- last_free_start = NULL;
- last_free_size = 0;
- }
+ Object* target_object = target->ToObjectUnchecked();
- // If the last region of this page was not live we remember it.
- if (!is_previous_alive) {
- ASSERT(last_free_size == 0);
- last_free_size = static_cast<int>(p->AllocationTop() - free_start);
- last_free_start = free_start;
- }
+ MigrateObject(HeapObject::cast(target_object)->address(),
+ object_addr,
+ size,
+ space->identity());
+ ASSERT(object->map_word().IsForwardingAddress());
}
- prev = p;
- }
-
- // We reached end of space. See if we need to adjust allocation top.
- Address new_allocation_top = NULL;
-
- if (first_empty_page->is_valid()) {
- // Last used pages in space are empty. We can move allocation top backwards
- // to the beginning of first empty page.
- ASSERT(prev == space->AllocationTopPage());
-
- new_allocation_top = first_empty_page->ObjectAreaStart();
+ // Clear marking bits for current cell.
+ cells[cell_index] = 0;
}
+ p->ResetLiveBytes();
+}
- if (last_free_size > 0) {
- // There was a free ending area on the previous page.
- // Deallocate it without putting it into freelist and move allocation
- // top to the beginning of this free area.
- space->DeallocateBlock(last_free_start, last_free_size, false);
- new_allocation_top = last_free_start;
- }
- if (new_allocation_top != NULL) {
-#ifdef DEBUG
- Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
- if (!first_empty_page->is_valid()) {
- ASSERT(new_allocation_top_page == space->AllocationTopPage());
- } else if (last_free_size > 0) {
- ASSERT(new_allocation_top_page == prec_first_empty_page);
- } else {
- ASSERT(new_allocation_top_page == first_empty_page);
+void MarkCompactCollector::EvacuatePages() {
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ ASSERT(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ if (p->IsEvacuationCandidate()) {
+ // During compaction we might have to request a new page.
+ // Check that space still have room for that.
+ if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
+ EvacuateLiveObjectsFromPage(p);
+ } else {
+ // Without room for expansion evacuation is not guaranteed to succeed.
+ // Pessimistically abandon unevacuated pages.
+ for (int j = i; j < npages; j++) {
+ Page* page = evacuation_candidates_[j];
+ slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
+ page->ClearEvacuationCandidate();
+ page->SetFlag(Page::RESCAN_ON_EVACUATION);
+ }
+ return;
+ }
}
-#endif
-
- space->SetTop(new_allocation_top);
}
}
-void MarkCompactCollector::EncodeForwardingAddresses() {
- ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
- // Objects in the active semispace of the young generation may be
- // relocated to the inactive semispace (if not promoted). Set the
- // relocation info to the beginning of the inactive semispace.
- heap()->new_space()->MCResetRelocationInfo();
-
- // Compute the forwarding pointers in each space.
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
- ReportDeleteIfNeeded>(
- heap()->old_pointer_space());
-
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
- IgnoreNonLiveObject>(
- heap()->old_data_space());
-
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
- ReportDeleteIfNeeded>(
- heap()->code_space());
-
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
- IgnoreNonLiveObject>(
- heap()->cell_space());
-
-
- // Compute new space next to last after the old and code spaces have been
- // compacted. Objects in new space can be promoted to old or code space.
- EncodeForwardingAddressesInNewSpace();
-
- // Compute map space last because computing forwarding addresses
- // overwrites non-live objects. Objects in the other spaces rely on
- // non-live map pointers to get the sizes of non-live objects.
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
- IgnoreNonLiveObject>(
- heap()->map_space());
-
- // Write relocation info to the top page, so we can use it later. This is
- // done after promoting objects from the new space so we get the correct
- // allocation top.
- heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
- heap()->old_data_space()->MCWriteRelocationInfoToPage();
- heap()->code_space()->MCWriteRelocationInfoToPage();
- heap()->map_space()->MCWriteRelocationInfoToPage();
- heap()->cell_space()->MCWriteRelocationInfoToPage();
-}
-
-
-class MapIterator : public HeapObjectIterator {
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
- explicit MapIterator(Heap* heap)
- : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
-
- MapIterator(Heap* heap, Address start)
- : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
-
- private:
- static int SizeCallback(HeapObject* unused) {
- USE(unused);
- return Map::kSize;
+ virtual Object* RetainAs(Object* object) {
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MapWord map_word = heap_object->map_word();
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress();
+ }
+ }
+ return object;
}
};
-class MapCompact {
- public:
- explicit MapCompact(Heap* heap, int live_maps)
- : heap_(heap),
- live_maps_(live_maps),
- to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
- vacant_map_it_(heap),
- map_to_evacuate_it_(heap, to_evacuate_start_),
- first_map_to_evacuate_(
- reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
- }
-
- void CompactMaps() {
- // As we know the number of maps to evacuate beforehand,
- // we stop then there is no more vacant maps.
- for (Map* next_vacant_map = NextVacantMap();
- next_vacant_map;
- next_vacant_map = NextVacantMap()) {
- EvacuateMap(next_vacant_map, NextMapToEvacuate());
+static inline void UpdateSlot(ObjectVisitor* v,
+ SlotsBuffer::SlotType slot_type,
+ Address addr) {
+ switch (slot_type) {
+ case SlotsBuffer::CODE_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+ rinfo.Visit(v);
+ break;
}
-
-#ifdef DEBUG
- CheckNoMapsToEvacuate();
-#endif
- }
-
- void UpdateMapPointersInRoots() {
- MapUpdatingVisitor map_updating_visitor;
- heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
- heap()->isolate()->global_handles()->IterateWeakRoots(
- &map_updating_visitor);
- LiveObjectList::IterateElements(&map_updating_visitor);
- }
-
- void UpdateMapPointersInPagedSpace(PagedSpace* space) {
- ASSERT(space != heap()->map_space());
-
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
- UpdateMapPointersInRange(heap(),
- p->ObjectAreaStart(),
- p->AllocationTop());
+ case SlotsBuffer::CODE_ENTRY_SLOT: {
+ v->VisitCodeEntry(addr);
+ break;
}
+ case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ Code::cast(obj)->CodeIterateBody(v);
+ break;
+ }
+ case SlotsBuffer::DEBUG_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+ break;
+ }
+ case SlotsBuffer::JS_RETURN_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
+ if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+ break;
+ }
+ case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ rinfo.Visit(v);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
+}
- void UpdateMapPointersInNewSpace() {
- NewSpace* space = heap()->new_space();
- UpdateMapPointersInRange(heap(), space->bottom(), space->top());
- }
-
- void UpdateMapPointersInLargeObjectSpace() {
- LargeObjectIterator it(heap()->lo_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdateMapPointersInObject(heap(), obj);
- }
-
- void Finish() {
- heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
- }
-
- inline Heap* heap() const { return heap_; }
-
- private:
- Heap* heap_;
- int live_maps_;
- Address to_evacuate_start_;
- MapIterator vacant_map_it_;
- MapIterator map_to_evacuate_it_;
- Map* first_map_to_evacuate_;
-
- // Helper class for updating map pointers in HeapObjects.
- class MapUpdatingVisitor: public ObjectVisitor {
- public:
- MapUpdatingVisitor() {}
- void VisitPointer(Object** p) {
- UpdateMapPointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) UpdateMapPointer(p);
- }
+enum SweepingMode {
+ SWEEP_ONLY,
+ SWEEP_AND_VISIT_LIVE_OBJECTS
+};
- private:
- void UpdateMapPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
- HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
- // Moved maps are tagged with overflowed map word. They are the only
- // objects those map word is overflowed as marking is already complete.
- MapWord map_word = old_map->map_word();
- if (!map_word.IsOverflowed()) return;
+enum SkipListRebuildingMode {
+ REBUILD_SKIP_LIST,
+ IGNORE_SKIP_LIST
+};
- *p = GetForwardedMap(map_word);
- }
- };
- static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
- while (true) {
- HeapObject* next = it->next();
- ASSERT(next != NULL);
- if (next == last)
- return NULL;
- ASSERT(!next->IsOverflowed());
- ASSERT(!next->IsMarked());
- ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
- if (next->IsMap() == live)
- return reinterpret_cast<Map*>(next);
+// Sweep a space precisely. After this has been done the space can
+// be iterated precisely, hitting only the live objects. Code space
+// is always swept precisely because we want to be able to iterate
+// over it. Map space is swept precisely, because it is not compacted.
+// Slots in live objects pointing into evacuation candidates are updated
+// if requested.
+template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+static void SweepPrecisely(PagedSpace* space,
+ Page* p,
+ ObjectVisitor* v) {
+ ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+ ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
+ space->identity() == CODE_SPACE);
+ ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+
+ MarkBit::CellType* cells = p->markbits()->cells();
+ p->MarkSweptPrecisely();
+
+ int last_cell_index =
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+ int cell_index = Page::kFirstUsedCell;
+ Address free_start = p->ObjectAreaStart();
+ ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+ Address object_address = p->ObjectAreaStart();
+ int offsets[16];
+
+ SkipList* skip_list = p->skip_list();
+ int curr_region = -1;
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+ skip_list->Clear();
+ }
+
+ for (cell_index = Page::kFirstUsedCell;
+ cell_index < last_cell_index;
+ cell_index++, object_address += 32 * kPointerSize) {
+ ASSERT((unsigned)cell_index ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(object_address))));
+ int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+ int live_index = 0;
+ for ( ; live_objects != 0; live_objects--) {
+ Address free_end = object_address + offsets[live_index++] * kPointerSize;
+ if (free_end != free_start) {
+ space->Free(free_start, static_cast<int>(free_end - free_start));
+ }
+ HeapObject* live_object = HeapObject::FromAddress(free_end);
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+ Map* map = live_object->map();
+ int size = live_object->SizeFromMap(map);
+ if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+ live_object->IterateBody(map->instance_type(), size, v);
+ }
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+ int new_region_start =
+ SkipList::RegionNumber(free_end);
+ int new_region_end =
+ SkipList::RegionNumber(free_end + size - kPointerSize);
+ if (new_region_start != curr_region ||
+ new_region_end != curr_region) {
+ skip_list->AddObject(free_end, size);
+ curr_region = new_region_end;
+ }
+ }
+ free_start = free_end + size;
}
+ // Clear marking bits for current cell.
+ cells[cell_index] = 0;
}
-
- Map* NextVacantMap() {
- Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
- ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
- return map;
+ if (free_start != p->ObjectAreaEnd()) {
+ space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
}
+ p->ResetLiveBytes();
+}
- Map* NextMapToEvacuate() {
- Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
- ASSERT(map != NULL);
- ASSERT(map->IsMap());
- return map;
- }
- static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
- ASSERT(FreeListNode::IsFreeListNode(vacant_map));
- ASSERT(map_to_evacuate->IsMap());
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+ Page* p = Page::FromAddress(code->address());
- ASSERT(Map::kSize % 4 == 0);
+ if (p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ return false;
+ }
- map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
- vacant_map->address(), map_to_evacuate->address(), Map::kSize);
+ Address code_start = code->address();
+ Address code_end = code_start + code->Size();
- ASSERT(vacant_map->IsMap()); // Due to memcpy above.
+ uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+ uint32_t end_index =
+ MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
- MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
- forwarding_map_word.SetOverflow();
- map_to_evacuate->set_map_word(forwarding_map_word);
+ Bitmap* b = p->markbits();
- ASSERT(map_to_evacuate->map_word().IsOverflowed());
- ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
- }
+ MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+ MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
- static Map* GetForwardedMap(MapWord map_word) {
- ASSERT(map_word.IsOverflowed());
- map_word.ClearOverflow();
- Map* new_map = map_word.ToMap();
- ASSERT_MAP_ALIGNED(new_map->address());
- return new_map;
- }
+ MarkBit::CellType* start_cell = start_mark_bit.cell();
+ MarkBit::CellType* end_cell = end_mark_bit.cell();
- static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
- ASSERT(!obj->IsMarked());
- Map* map = obj->map();
- ASSERT(heap->map_space()->Contains(map));
- MapWord map_word = map->map_word();
- ASSERT(!map_word.IsMarked());
- if (map_word.IsOverflowed()) {
- Map* new_map = GetForwardedMap(map_word);
- ASSERT(heap->map_space()->Contains(new_map));
- obj->set_map(new_map);
+ if (value) {
+ MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+ MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n",
- obj->address(),
- reinterpret_cast<void*>(map),
- reinterpret_cast<void*>(new_map));
+ if (start_cell == end_cell) {
+ *start_cell |= start_mask & end_mask;
+ } else {
+ *start_cell |= start_mask;
+ for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+ *cell = ~0;
}
-#endif
+ *end_cell |= end_mask;
}
-
- int size = obj->SizeFromMap(map);
- MapUpdatingVisitor map_updating_visitor;
- obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
- return size;
- }
-
- static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
- HeapObject* object;
- int size;
- for (Address current = start; current < end; current += size) {
- object = HeapObject::FromAddress(current);
- size = UpdateMapPointersInObject(heap, object);
- ASSERT(size > 0);
+ } else {
+ for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+ *cell = 0;
}
}
-#ifdef DEBUG
- void CheckNoMapsToEvacuate() {
- if (!FLAG_enable_slow_asserts)
- return;
+ return true;
+}
- for (HeapObject* obj = map_to_evacuate_it_.next();
- obj != NULL; obj = map_to_evacuate_it_.next())
- ASSERT(FreeListNode::IsFreeListNode(obj));
- }
-#endif
-};
+static bool IsOnInvalidatedCodeObject(Address addr) {
+ // We did not record any slots in large objects thus
+ // we can safely go to the page from the slot address.
+ Page* p = Page::FromAddress(addr);
-void MarkCompactCollector::SweepSpaces() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
+ // First check owner's identity because old pointer and old data spaces
+ // are swept lazily and might still have non-zero mark-bits on some
+ // pages.
+ if (p->owner()->identity() != CODE_SPACE) return false;
- ASSERT(state_ == SWEEP_SPACES);
- ASSERT(!IsCompacting());
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
- SweepSpace(heap(), heap()->old_pointer_space());
- SweepSpace(heap(), heap()->old_data_space());
- SweepSpace(heap(), heap()->code_space());
- SweepSpace(heap(), heap()->cell_space());
- { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- SweepNewSpace(heap(), heap()->new_space());
- }
- SweepSpace(heap(), heap()->map_space());
-
- heap()->IterateDirtyRegions(heap()->map_space(),
- &heap()->IteratePointersInDirtyMapsRegion,
- &UpdatePointerToNewGen,
- heap()->WATERMARK_SHOULD_BE_VALID);
+ // In code space only bits on evacuation candidates (but we don't record
+ // any slots on them) and under invalidated code objects are non-zero.
+ MarkBit mark_bit =
+ p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
- intptr_t live_maps_size = heap()->map_space()->Size();
- int live_maps = static_cast<int>(live_maps_size / Map::kSize);
- ASSERT(live_map_objects_size_ == live_maps_size);
+ return mark_bit.Get();
+}
- if (heap()->map_space()->NeedsCompaction(live_maps)) {
- MapCompact map_compact(heap(), live_maps);
- map_compact.CompactMaps();
- map_compact.UpdateMapPointersInRoots();
+void MarkCompactCollector::InvalidateCode(Code* code) {
+ if (heap_->incremental_marking()->IsCompacting() &&
+ !ShouldSkipEvacuationSlotRecording(code)) {
+ ASSERT(compacting_);
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL; space = spaces.next()) {
- if (space == heap()->map_space()) continue;
- map_compact.UpdateMapPointersInPagedSpace(space);
- }
- map_compact.UpdateMapPointersInNewSpace();
- map_compact.UpdateMapPointersInLargeObjectSpace();
+ // If the object is white than no slots were recorded on it yet.
+ MarkBit mark_bit = Marking::MarkBitFrom(code);
+ if (Marking::IsWhite(mark_bit)) return;
- map_compact.Finish();
+ invalidated_code_.Add(code);
}
}
-// Iterate the live objects in a range of addresses (eg, a page or a
-// semispace). The live regions of the range have been linked into a list.
-// The first live region is [first_live_start, first_live_end), and the last
-// address in the range is top. The callback function is used to get the
-// size of each live object.
-int MarkCompactCollector::IterateLiveObjectsInRange(
- Address start,
- Address end,
- LiveObjectCallback size_func) {
- int live_objects_size = 0;
- Address current = start;
- while (current < end) {
- uint32_t encoded_map = Memory::uint32_at(current);
- if (encoded_map == kSingleFreeEncoding) {
- current += kPointerSize;
- } else if (encoded_map == kMultiFreeEncoding) {
- current += Memory::int_at(current + kIntSize);
- } else {
- int size = (this->*size_func)(HeapObject::FromAddress(current));
- current += size;
- live_objects_size += size;
+bool MarkCompactCollector::MarkInvalidatedCode() {
+ bool code_marked = false;
+
+ int length = invalidated_code_.length();
+ for (int i = 0; i < length; i++) {
+ Code* code = invalidated_code_[i];
+
+ if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+ code_marked = true;
}
}
- return live_objects_size;
+
+ return code_marked;
}
-int MarkCompactCollector::IterateLiveObjects(
- NewSpace* space, LiveObjectCallback size_f) {
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+ int length = invalidated_code_.length();
+ for (int i = 0; i < length; i++) {
+ if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+ }
}
-int MarkCompactCollector::IterateLiveObjects(
- PagedSpace* space, LiveObjectCallback size_f) {
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- int total = 0;
- PageIterator it(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
- total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
- p->AllocationTop(),
- size_f);
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+ int length = invalidated_code_.length();
+ for (int i = 0; i < length; i++) {
+ Code* code = invalidated_code_[i];
+ if (code != NULL) {
+ code->Iterate(visitor);
+ SetMarkBitsUnderInvalidatedCode(code, false);
+ }
}
- return total;
+ invalidated_code_.Rewind(0);
}
-// -------------------------------------------------------------------------
-// Phase 3: Update pointers
-
-// Helper class for updating pointers in HeapObjects.
-class UpdatingVisitor: public ObjectVisitor {
- public:
- explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ bool code_slots_filtering_required;
+ { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+ code_slots_filtering_required = MarkInvalidatedCode();
- void VisitPointer(Object** p) {
- UpdatePointer(p);
+ EvacuateNewSpace();
}
- void VisitPointers(Object** start, Object** end) {
- // Mark all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- VisitPointer(&target);
- rinfo->set_target_address(
- reinterpret_cast<Code*>(target)->instruction_start());
+ { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
+ EvacuatePages();
}
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- VisitPointer(&target);
- rinfo->set_call_address(
- reinterpret_cast<Code*>(target)->instruction_start());
+ // Second pass: find pointers to new space and update them.
+ PointersUpdatingVisitor updating_visitor(heap());
+
+ { GCTracer::Scope gc_scope(tracer_,
+ GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+ // Update pointers in to space.
+ SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+ heap()->new_space()->top());
+ for (HeapObject* object = to_it.Next();
+ object != NULL;
+ object = to_it.Next()) {
+ Map* map = object->map();
+ object->IterateBody(map->instance_type(),
+ object->SizeFromMap(map),
+ &updating_visitor);
+ }
+ }
+
+ { GCTracer::Scope gc_scope(tracer_,
+ GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
+ // Update roots.
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+ LiveObjectList::IterateElements(&updating_visitor);
+ }
+
+ { GCTracer::Scope gc_scope(tracer_,
+ GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
+ StoreBufferRebuildScope scope(heap_,
+ heap_->store_buffer(),
+ &Heap::ScavengeStoreBufferCallback);
+ heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+ }
+
+ { GCTracer::Scope gc_scope(tracer_,
+ GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+ migration_slots_buffer_,
+ code_slots_filtering_required);
+ if (FLAG_trace_fragmentation) {
+ PrintF(" migration slots buffer: %d\n",
+ SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+ }
+
+ if (compacting_ && was_marked_incrementally_) {
+ // It's difficult to filter out slots recorded for large objects.
+ LargeObjectIterator it(heap_->lo_space());
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ // LargeObjectSpace is not swept yet thus we have to skip
+ // dead objects explicitly.
+ if (!IsMarked(obj)) continue;
+
+ Page* p = Page::FromAddress(obj->address());
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ obj->Iterate(&updating_visitor);
+ p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+ }
+ }
+ }
}
- inline Heap* heap() const { return heap_; }
+ int npages = evacuation_candidates_.length();
+ { GCTracer::Scope gc_scope(
+ tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ ASSERT(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- private:
- void UpdatePointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
- Address old_addr = obj->address();
- Address new_addr;
- ASSERT(!heap()->InFromSpace(obj));
-
- if (heap()->new_space()->Contains(obj)) {
- Address forwarding_pointer_addr =
- heap()->new_space()->FromSpaceLow() +
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
- new_addr = Memory::Address_at(forwarding_pointer_addr);
-
-#ifdef DEBUG
- ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
- heap()->old_data_space()->Contains(new_addr) ||
- heap()->new_space()->FromSpaceContains(new_addr) ||
- heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
- if (heap()->new_space()->FromSpaceContains(new_addr)) {
- ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
- }
-#endif
-
- } else if (heap()->lo_space()->Contains(obj)) {
- // Don't move objects in the large object space.
- return;
+ if (p->IsEvacuationCandidate()) {
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+ p->slots_buffer(),
+ code_slots_filtering_required);
+ if (FLAG_trace_fragmentation) {
+ PrintF(" page %p slots buffer: %d\n",
+ reinterpret_cast<void*>(p),
+ SlotsBuffer::SizeOfChain(p->slots_buffer()));
+ }
- } else {
-#ifdef DEBUG
- PagedSpaces spaces;
- PagedSpace* original_space = spaces.next();
- while (original_space != NULL) {
- if (original_space->Contains(obj)) break;
- original_space = spaces.next();
+ // Important: skip list should be cleared only after roots were updated
+ // because root iteration traverses the stack and might have to find
+ // code objects from non-updated pc pointing into evacuation candidate.
+ SkipList* list = p->skip_list();
+ if (list != NULL) list->Clear();
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+ switch (space->identity()) {
+ case OLD_DATA_SPACE:
+ SweepConservatively(space, p);
+ break;
+ case OLD_POINTER_SPACE:
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+ space, p, &updating_visitor);
+ break;
+ case CODE_SPACE:
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
+ space, p, &updating_visitor);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
- ASSERT(original_space != NULL);
-#endif
- new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
- ASSERT(original_space->Contains(new_addr));
- ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
- original_space->MCSpaceOffsetForAddress(old_addr));
}
+ }
- *p = HeapObject::FromAddress(new_addr);
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n",
- reinterpret_cast<Address>(p), old_addr, new_addr);
+ // Update pointers from cells.
+ HeapObjectIterator cell_iterator(heap_->cell_space());
+ for (HeapObject* cell = cell_iterator.Next();
+ cell != NULL;
+ cell = cell_iterator.Next()) {
+ if (cell->IsJSGlobalPropertyCell()) {
+ Address value_address =
+ reinterpret_cast<Address>(cell) +
+ (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+ updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
-#endif
}
- Heap* heap_;
-};
+ // Update pointer from the global contexts list.
+ updating_visitor.VisitPointer(heap_->global_contexts_list_address());
+ heap_->symbol_table()->Iterate(&updating_visitor);
-void MarkCompactCollector::UpdatePointers() {
-#ifdef DEBUG
- ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
- state_ = UPDATE_POINTERS;
-#endif
- UpdatingVisitor updating_visitor(heap());
+ // Update pointers from external string table.
+ heap_->UpdateReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
+
+ // Update JSFunction pointers from the runtime profiler.
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
&updating_visitor);
- heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
- heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
-
- // Update the pointer to the head of the weak list of global contexts.
- updating_visitor.VisitPointer(&heap()->global_contexts_list_);
-
- LiveObjectList::IterateElements(&updating_visitor);
-
- int live_maps_size = IterateLiveObjects(
- heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
- int live_pointer_olds_size = IterateLiveObjects(
- heap()->old_pointer_space(),
- &MarkCompactCollector::UpdatePointersInOldObject);
- int live_data_olds_size = IterateLiveObjects(
- heap()->old_data_space(),
- &MarkCompactCollector::UpdatePointersInOldObject);
- int live_codes_size = IterateLiveObjects(
- heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
- int live_cells_size = IterateLiveObjects(
- heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
- int live_news_size = IterateLiveObjects(
- heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
-
- // Large objects do not move, the map word can be updated directly.
- LargeObjectIterator it(heap()->lo_space());
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
- UpdatePointersInNewObject(obj);
- }
-
- USE(live_maps_size);
- USE(live_pointer_olds_size);
- USE(live_data_olds_size);
- USE(live_codes_size);
- USE(live_cells_size);
- USE(live_news_size);
- ASSERT(live_maps_size == live_map_objects_size_);
- ASSERT(live_data_olds_size == live_old_data_objects_size_);
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
- ASSERT(live_codes_size == live_code_objects_size_);
- ASSERT(live_cells_size == live_cell_objects_size_);
- ASSERT(live_news_size == live_young_objects_size_);
-}
-
-
-int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
- // Keep old map pointers
- Map* old_map = obj->map();
- ASSERT(old_map->IsHeapObject());
-
- Address forwarded = GetForwardingAddressInOldSpace(old_map);
-
- ASSERT(heap()->map_space()->Contains(old_map));
- ASSERT(heap()->map_space()->Contains(forwarded));
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
- forwarded);
- }
-#endif
- // Update the map pointer.
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
-
- // We have to compute the object size relying on the old map because
- // map objects are not relocated yet.
- int obj_size = obj->SizeFromMap(old_map);
-
- // Update pointers in the object body.
- UpdatingVisitor updating_visitor(heap());
- obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
- return obj_size;
-}
-
-
-int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
- // Decode the map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
- // At this point, the first word of map_addr is also encoded, cannot
- // cast it to Map* using Map::cast.
- Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
- int obj_size = obj->SizeFromMap(map);
- InstanceType type = map->instance_type();
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessWeakReferences(&evacuation_object_retainer);
- // Update map pointer.
- Address new_map_addr = GetForwardingAddressInOldSpace(map);
- int offset = encoding.DecodeOffset();
- obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
+ // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+ // under it.
+ ProcessInvalidatedCode(&updating_visitor);
#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("update %p : %p -> %p\n", obj->address(),
- map_addr, new_map_addr);
+ if (FLAG_verify_heap) {
+ VerifyEvacuation(heap_);
}
#endif
- // Update pointers in the object body.
- UpdatingVisitor updating_visitor(heap());
- obj->IterateBody(type, obj_size, &updating_visitor);
- return obj_size;
+ slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
+ ASSERT(migration_slots_buffer_ == NULL);
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ if (!p->IsEvacuationCandidate()) continue;
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
+ p->set_scan_on_scavenge(false);
+ slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+ p->ClearEvacuationCandidate();
+ }
+ evacuation_candidates_.Rewind(0);
+ compacting_ = false;
}
-Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
- // Object should either in old or map space.
- MapWord encoding = obj->map_word();
+static const int kStartTableEntriesPerLine = 5;
+static const int kStartTableLines = 171;
+static const int kStartTableInvalidLine = 127;
+static const int kStartTableUnusedEntry = 126;
- // Offset to the first live object's forwarding address.
- int offset = encoding.DecodeOffset();
- Address obj_addr = obj->address();
+#define _ kStartTableUnusedEntry
+#define X kStartTableInvalidLine
+// Mark-bit to object start offset table.
+//
+// The line is indexed by the mark bits in a byte. The first number on
+// the line describes the number of live object starts for the line and the
+// other numbers on the line describe the offsets (in words) of the object
+// starts.
+//
+// Since objects are at least 2 words large we don't have entries for two
+// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
+ 0, _, _, _, _, // 0
+ 1, 0, _, _, _, // 1
+ 1, 1, _, _, _, // 2
+ X, _, _, _, _, // 3
+ 1, 2, _, _, _, // 4
+ 2, 0, 2, _, _, // 5
+ X, _, _, _, _, // 6
+ X, _, _, _, _, // 7
+ 1, 3, _, _, _, // 8
+ 2, 0, 3, _, _, // 9
+ 2, 1, 3, _, _, // 10
+ X, _, _, _, _, // 11
+ X, _, _, _, _, // 12
+ X, _, _, _, _, // 13
+ X, _, _, _, _, // 14
+ X, _, _, _, _, // 15
+ 1, 4, _, _, _, // 16
+ 2, 0, 4, _, _, // 17
+ 2, 1, 4, _, _, // 18
+ X, _, _, _, _, // 19
+ 2, 2, 4, _, _, // 20
+ 3, 0, 2, 4, _, // 21
+ X, _, _, _, _, // 22
+ X, _, _, _, _, // 23
+ X, _, _, _, _, // 24
+ X, _, _, _, _, // 25
+ X, _, _, _, _, // 26
+ X, _, _, _, _, // 27
+ X, _, _, _, _, // 28
+ X, _, _, _, _, // 29
+ X, _, _, _, _, // 30
+ X, _, _, _, _, // 31
+ 1, 5, _, _, _, // 32
+ 2, 0, 5, _, _, // 33
+ 2, 1, 5, _, _, // 34
+ X, _, _, _, _, // 35
+ 2, 2, 5, _, _, // 36
+ 3, 0, 2, 5, _, // 37
+ X, _, _, _, _, // 38
+ X, _, _, _, _, // 39
+ 2, 3, 5, _, _, // 40
+ 3, 0, 3, 5, _, // 41
+ 3, 1, 3, 5, _, // 42
+ X, _, _, _, _, // 43
+ X, _, _, _, _, // 44
+ X, _, _, _, _, // 45
+ X, _, _, _, _, // 46
+ X, _, _, _, _, // 47
+ X, _, _, _, _, // 48
+ X, _, _, _, _, // 49
+ X, _, _, _, _, // 50
+ X, _, _, _, _, // 51
+ X, _, _, _, _, // 52
+ X, _, _, _, _, // 53
+ X, _, _, _, _, // 54
+ X, _, _, _, _, // 55
+ X, _, _, _, _, // 56
+ X, _, _, _, _, // 57
+ X, _, _, _, _, // 58
+ X, _, _, _, _, // 59
+ X, _, _, _, _, // 60
+ X, _, _, _, _, // 61
+ X, _, _, _, _, // 62
+ X, _, _, _, _, // 63
+ 1, 6, _, _, _, // 64
+ 2, 0, 6, _, _, // 65
+ 2, 1, 6, _, _, // 66
+ X, _, _, _, _, // 67
+ 2, 2, 6, _, _, // 68
+ 3, 0, 2, 6, _, // 69
+ X, _, _, _, _, // 70
+ X, _, _, _, _, // 71
+ 2, 3, 6, _, _, // 72
+ 3, 0, 3, 6, _, // 73
+ 3, 1, 3, 6, _, // 74
+ X, _, _, _, _, // 75
+ X, _, _, _, _, // 76
+ X, _, _, _, _, // 77
+ X, _, _, _, _, // 78
+ X, _, _, _, _, // 79
+ 2, 4, 6, _, _, // 80
+ 3, 0, 4, 6, _, // 81
+ 3, 1, 4, 6, _, // 82
+ X, _, _, _, _, // 83
+ 3, 2, 4, 6, _, // 84
+ 4, 0, 2, 4, 6, // 85
+ X, _, _, _, _, // 86
+ X, _, _, _, _, // 87
+ X, _, _, _, _, // 88
+ X, _, _, _, _, // 89
+ X, _, _, _, _, // 90
+ X, _, _, _, _, // 91
+ X, _, _, _, _, // 92
+ X, _, _, _, _, // 93
+ X, _, _, _, _, // 94
+ X, _, _, _, _, // 95
+ X, _, _, _, _, // 96
+ X, _, _, _, _, // 97
+ X, _, _, _, _, // 98
+ X, _, _, _, _, // 99
+ X, _, _, _, _, // 100
+ X, _, _, _, _, // 101
+ X, _, _, _, _, // 102
+ X, _, _, _, _, // 103
+ X, _, _, _, _, // 104
+ X, _, _, _, _, // 105
+ X, _, _, _, _, // 106
+ X, _, _, _, _, // 107
+ X, _, _, _, _, // 108
+ X, _, _, _, _, // 109
+ X, _, _, _, _, // 110
+ X, _, _, _, _, // 111
+ X, _, _, _, _, // 112
+ X, _, _, _, _, // 113
+ X, _, _, _, _, // 114
+ X, _, _, _, _, // 115
+ X, _, _, _, _, // 116
+ X, _, _, _, _, // 117
+ X, _, _, _, _, // 118
+ X, _, _, _, _, // 119
+ X, _, _, _, _, // 120
+ X, _, _, _, _, // 121
+ X, _, _, _, _, // 122
+ X, _, _, _, _, // 123
+ X, _, _, _, _, // 124
+ X, _, _, _, _, // 125
+ X, _, _, _, _, // 126
+ X, _, _, _, _, // 127
+ 1, 7, _, _, _, // 128
+ 2, 0, 7, _, _, // 129
+ 2, 1, 7, _, _, // 130
+ X, _, _, _, _, // 131
+ 2, 2, 7, _, _, // 132
+ 3, 0, 2, 7, _, // 133
+ X, _, _, _, _, // 134
+ X, _, _, _, _, // 135
+ 2, 3, 7, _, _, // 136
+ 3, 0, 3, 7, _, // 137
+ 3, 1, 3, 7, _, // 138
+ X, _, _, _, _, // 139
+ X, _, _, _, _, // 140
+ X, _, _, _, _, // 141
+ X, _, _, _, _, // 142
+ X, _, _, _, _, // 143
+ 2, 4, 7, _, _, // 144
+ 3, 0, 4, 7, _, // 145
+ 3, 1, 4, 7, _, // 146
+ X, _, _, _, _, // 147
+ 3, 2, 4, 7, _, // 148
+ 4, 0, 2, 4, 7, // 149
+ X, _, _, _, _, // 150
+ X, _, _, _, _, // 151
+ X, _, _, _, _, // 152
+ X, _, _, _, _, // 153
+ X, _, _, _, _, // 154
+ X, _, _, _, _, // 155
+ X, _, _, _, _, // 156
+ X, _, _, _, _, // 157
+ X, _, _, _, _, // 158
+ X, _, _, _, _, // 159
+ 2, 5, 7, _, _, // 160
+ 3, 0, 5, 7, _, // 161
+ 3, 1, 5, 7, _, // 162
+ X, _, _, _, _, // 163
+ 3, 2, 5, 7, _, // 164
+ 4, 0, 2, 5, 7, // 165
+ X, _, _, _, _, // 166
+ X, _, _, _, _, // 167
+ 3, 3, 5, 7, _, // 168
+ 4, 0, 3, 5, 7, // 169
+ 4, 1, 3, 5, 7 // 170
+};
+#undef _
+#undef X
+
+
+// Takes a word of mark bits. Returns the number of objects that start in the
+// range. Puts the offsets of the words in the supplied array.
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
+ int objects = 0;
+ int offset = 0;
+
+ // No consecutive 1 bits.
+ ASSERT((mark_bits & 0x180) != 0x180);
+ ASSERT((mark_bits & 0x18000) != 0x18000);
+ ASSERT((mark_bits & 0x1800000) != 0x1800000);
+
+ while (mark_bits != 0) {
+ int byte = (mark_bits & 0xff);
+ mark_bits >>= 8;
+ if (byte != 0) {
+ ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
+ char* table = kStartTable + byte * kStartTableEntriesPerLine;
+ int objects_in_these_8_words = table[0];
+ ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
+ ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
+ for (int i = 0; i < objects_in_these_8_words; i++) {
+ starts[objects++] = offset + table[1 + i];
+ }
+ }
+ offset += 8;
+ }
+ return objects;
+}
- // Find the first live object's forwarding address.
- Page* p = Page::FromAddress(obj_addr);
- Address first_forwarded = p->mc_first_forwarded;
- // Page start address of forwarded address.
- Page* forwarded_page = Page::FromAddress(first_forwarded);
- int forwarded_offset = forwarded_page->Offset(first_forwarded);
+static inline Address DigestFreeStart(Address approximate_free_start,
+ uint32_t free_start_cell) {
+ ASSERT(free_start_cell != 0);
- // Find end of allocation in the page of first_forwarded.
- int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
+ // No consecutive 1 bits.
+ ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
- // Check if current object's forward pointer is in the same page
- // as the first live object's forwarding pointer
- if (forwarded_offset + offset < mc_top_offset) {
- // In the same page.
- return first_forwarded + offset;
+ int offsets[16];
+ uint32_t cell = free_start_cell;
+ int offset_of_last_live;
+ if ((cell & 0x80000000u) != 0) {
+ // This case would overflow below.
+ offset_of_last_live = 31;
+ } else {
+ // Remove all but one bit, the most significant. This is an optimization
+ // that may or may not be worthwhile.
+ cell |= cell >> 16;
+ cell |= cell >> 8;
+ cell |= cell >> 4;
+ cell |= cell >> 2;
+ cell |= cell >> 1;
+ cell = (cell + 1) >> 1;
+ int live_objects = MarkWordToObjectStarts(cell, offsets);
+ ASSERT(live_objects == 1);
+ offset_of_last_live = offsets[live_objects - 1];
+ }
+ Address last_live_start =
+ approximate_free_start + offset_of_last_live * kPointerSize;
+ HeapObject* last_live = HeapObject::FromAddress(last_live_start);
+ Address free_start = last_live_start + last_live->Size();
+ return free_start;
+}
+
+
+static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
+ ASSERT(cell != 0);
+
+ // No consecutive 1 bits.
+ ASSERT((cell & (cell << 1)) == 0);
+
+ int offsets[16];
+ if (cell == 0x80000000u) { // Avoid overflow below.
+ return block_address + 31 * kPointerSize;
+ }
+ uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
+ ASSERT((first_set_bit & cell) == first_set_bit);
+ int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
+ ASSERT(live_objects == 1);
+ USE(live_objects);
+ return block_address + offsets[0] * kPointerSize;
+}
+
+
+// Sweeps a space conservatively. After this has been done the larger free
+// spaces have been put on the free list and the smaller ones have been
+// ignored and left untouched. A free space is always either ignored or put
+// on the free list, never split up into two parts. This is important
+// because it means that any FreeSpace maps left actually describe a region of
+// memory that can be ignored when scanning. Dead objects other than free
+// spaces will not contain the free space map.
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+ ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+ MarkBit::CellType* cells = p->markbits()->cells();
+ p->MarkSweptConservatively();
+
+ int last_cell_index =
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+ int cell_index = Page::kFirstUsedCell;
+ intptr_t freed_bytes = 0;
+
+ // This is the start of the 32 word block that we are currently looking at.
+ Address block_address = p->ObjectAreaStart();
+
+ // Skip over all the dead objects at the start of the page and mark them free.
+ for (cell_index = Page::kFirstUsedCell;
+ cell_index < last_cell_index;
+ cell_index++, block_address += 32 * kPointerSize) {
+ if (cells[cell_index] != 0) break;
+ }
+ size_t size = block_address - p->ObjectAreaStart();
+ if (cell_index == last_cell_index) {
+ freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
+ static_cast<int>(size)));
+ ASSERT_EQ(0, p->LiveBytes());
+ return freed_bytes;
+ }
+ // Grow the size of the start-of-page free space a little to get up to the
+ // first live object.
+ Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
+ // Free the first free space.
+ size = free_end - p->ObjectAreaStart();
+ freed_bytes += space->Free(p->ObjectAreaStart(),
+ static_cast<int>(size));
+ // The start of the current free area is represented in undigested form by
+ // the address of the last 32-word section that contained a live object and
+ // the marking bitmap for that cell, which describes where the live object
+ // started. Unless we find a large free space in the bitmap we will not
+ // digest this pair into a real address. We start the iteration here at the
+ // first word in the marking bit map that indicates a live object.
+ Address free_start = block_address;
+ uint32_t free_start_cell = cells[cell_index];
+
+ for ( ;
+ cell_index < last_cell_index;
+ cell_index++, block_address += 32 * kPointerSize) {
+ ASSERT((unsigned)cell_index ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(block_address))));
+ uint32_t cell = cells[cell_index];
+ if (cell != 0) {
+ // We have a live object. Check approximately whether it is more than 32
+ // words since the last live object.
+ if (block_address - free_start > 32 * kPointerSize) {
+ free_start = DigestFreeStart(free_start, free_start_cell);
+ if (block_address - free_start > 32 * kPointerSize) {
+ // Now that we know the exact start of the free space it still looks
+ // like we have a large enough free space to be worth bothering with.
+ // so now we need to find the start of the first live object at the
+ // end of the free space.
+ free_end = StartOfLiveObject(block_address, cell);
+ freed_bytes += space->Free(free_start,
+ static_cast<int>(free_end - free_start));
+ }
+ }
+ // Update our undigested record of where the current free area started.
+ free_start = block_address;
+ free_start_cell = cell;
+ // Clear marking bits for current cell.
+ cells[cell_index] = 0;
+ }
}
- // Must be in the next page, NOTE: this may cross chunks.
- Page* next_page = forwarded_page->next_page();
- ASSERT(next_page->is_valid());
+ // Handle the free space at the end of the page.
+ if (block_address - free_start > 32 * kPointerSize) {
+ free_start = DigestFreeStart(free_start, free_start_cell);
+ freed_bytes += space->Free(free_start,
+ static_cast<int>(block_address - free_start));
+ }
- offset -= (mc_top_offset - forwarded_offset);
- offset += Page::kObjectStartOffset;
+ p->ResetLiveBytes();
+ return freed_bytes;
+}
- ASSERT_PAGE_OFFSET(offset);
- ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
- return next_page->OffsetToAddress(offset);
-}
+void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+ space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
+ sweeper == LAZY_CONSERVATIVE);
+ space->ClearStats();
-// -------------------------------------------------------------------------
-// Phase 4: Relocate objects
+ PageIterator it(space);
-void MarkCompactCollector::RelocateObjects() {
-#ifdef DEBUG
- ASSERT(state_ == UPDATE_POINTERS);
- state_ = RELOCATE_OBJECTS;
-#endif
- // Relocates objects, always relocate map objects first. Relocating
- // objects in other space relies on map objects to get object size.
- int live_maps_size = IterateLiveObjects(
- heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
- int live_pointer_olds_size = IterateLiveObjects(
- heap()->old_pointer_space(),
- &MarkCompactCollector::RelocateOldPointerObject);
- int live_data_olds_size = IterateLiveObjects(
- heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
- int live_codes_size = IterateLiveObjects(
- heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
- int live_cells_size = IterateLiveObjects(
- heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
- int live_news_size = IterateLiveObjects(
- heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
-
- USE(live_maps_size);
- USE(live_pointer_olds_size);
- USE(live_data_olds_size);
- USE(live_codes_size);
- USE(live_cells_size);
- USE(live_news_size);
- ASSERT(live_maps_size == live_map_objects_size_);
- ASSERT(live_data_olds_size == live_old_data_objects_size_);
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
- ASSERT(live_codes_size == live_code_objects_size_);
- ASSERT(live_cells_size == live_cell_objects_size_);
- ASSERT(live_news_size == live_young_objects_size_);
-
- // Flip from and to spaces
- heap()->new_space()->Flip();
-
- heap()->new_space()->MCCommitRelocationInfo();
-
- // Set age_mark to bottom in to space
- Address mark = heap()->new_space()->bottom();
- heap()->new_space()->set_age_mark(mark);
+ intptr_t freed_bytes = 0;
+ int pages_swept = 0;
+ intptr_t newspace_size = space->heap()->new_space()->Size();
+ bool lazy_sweeping_active = false;
+ bool unused_page_present = false;
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->MCCommitRelocationInfo();
+ intptr_t old_space_size = heap()->PromotedSpaceSize();
+ intptr_t space_left =
+ Min(heap()->OldGenPromotionLimit(old_space_size),
+ heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
- heap()->CheckNewSpaceExpansionCriteria();
- heap()->IncrementYoungSurvivorsCounter(live_news_size);
-}
+ while (it.has_next()) {
+ Page* p = it.next();
+ // Clear sweeping flags indicating that marking bits are still intact.
+ p->ClearSweptPrecisely();
+ p->ClearSweptConservatively();
-int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
- // Recover map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ if (p->IsEvacuationCandidate()) {
+ ASSERT(evacuation_candidates_.length() > 0);
+ continue;
+ }
- // Get forwarding address before resetting map pointer
- Address new_addr = GetForwardingAddressInOldSpace(obj);
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ // Will be processed in EvacuateNewSpaceAndCandidates.
+ continue;
+ }
- // Reset map pointer. The meta map object may not be copied yet so
- // Map::cast does not yet work.
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+ if (lazy_sweeping_active) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ continue;
+ }
- Address old_addr = obj->address();
+ // One unused page is kept, all further are released before sweeping them.
+ if (p->LiveBytes() == 0) {
+ if (unused_page_present) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ space->ReleasePage(p);
+ continue;
+ }
+ unused_page_present = true;
+ }
- if (new_addr != old_addr) {
- // Move contents.
- heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- Map::kSize);
+ switch (sweeper) {
+ case CONSERVATIVE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ SweepConservatively(space, p);
+ pages_swept++;
+ break;
+ }
+ case LAZY_CONSERVATIVE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ freed_bytes += SweepConservatively(space, p);
+ pages_swept++;
+ if (space_left + freed_bytes > newspace_size) {
+ space->SetPagesToSweep(p->next_page());
+ lazy_sweeping_active = true;
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
+ freed_bytes);
+ }
+ }
+ break;
+ }
+ case PRECISE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ if (space->identity() == CODE_SPACE) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+ } else {
+ SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+ }
+ pages_swept++;
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ }
+ }
}
-#ifdef DEBUG
if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ PrintF("SweepSpace: %s (%d pages swept)\n",
+ AllocationSpaceName(space->identity()),
+ pages_swept);
}
-#endif
- return Map::kSize;
+ // Give pages that are queued to be freed back to the OS.
+ heap()->FreeQueuedChunks();
}
-static inline int RestoreMap(HeapObject* obj,
- PagedSpace* space,
- Address new_addr,
- Address map_addr) {
- // This must be a non-map object, and the function relies on the
- // assumption that the Map space is compacted before the other paged
- // spaces (see RelocateObjects).
-
- // Reset map pointer.
- obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
-
- int obj_size = obj->Size();
- ASSERT_OBJECT_SIZE(obj_size);
-
- ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
- space->MCSpaceOffsetForAddress(obj->address()));
-
+void MarkCompactCollector::SweepSpaces() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", obj->address(), new_addr);
- }
+ state_ = SWEEP_SPACES;
#endif
+ SweeperType how_to_sweep =
+ FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+ if (sweep_precisely_) how_to_sweep = PRECISE;
+ // Noncompacting collections simply sweep the spaces to clear the mark
+ // bits and free the nonlive blocks (for old and map spaces). We sweep
+ // the map space last because freeing non-live maps overwrites them and
+ // the other spaces rely on possibly non-live maps to get the sizes for
+ // non-live objects.
+ SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+ SweepSpace(heap()->old_data_space(), how_to_sweep);
- return obj_size;
-}
+ RemoveDeadInvalidatedCode();
+ SweepSpace(heap()->code_space(), PRECISE);
+ SweepSpace(heap()->cell_space(), PRECISE);
-int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
- PagedSpace* space) {
- // Recover map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(map_addr));
+ EvacuateNewSpaceAndCandidates();
- // Get forwarding address before resetting map pointer.
- Address new_addr = GetForwardingAddressInOldSpace(obj);
+ // ClearNonLiveTransitions depends on precise sweeping of map space to
+ // detect whether unmarked map became dead in this collection or in one
+ // of the previous ones.
+ SweepSpace(heap()->map_space(), PRECISE);
- // Reset the map pointer.
- int obj_size = RestoreMap(obj, space, new_addr, map_addr);
+ // Deallocate unmarked objects and clear marked bits for marked objects.
+ heap_->lo_space()->FreeUnmarkedObjects();
+}
- Address old_addr = obj->address();
- if (new_addr != old_addr) {
- // Move contents.
- if (space == heap()->old_data_space()) {
- heap()->MoveBlock(new_addr, old_addr, obj_size);
- } else {
- heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
- }
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+ if (enable) {
+ if (code_flusher_ != NULL) return;
+ code_flusher_ = new CodeFlusher(heap()->isolate());
+ } else {
+ if (code_flusher_ == NULL) return;
+ delete code_flusher_;
+ code_flusher_ = NULL;
}
+}
- ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(heap()->isolate(),
- SharedFunctionInfoMoveEvent(old_addr, new_addr));
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
+// Our profiling tools do not expect intersections between
+// code objects. We should either reenable it or change our tools.
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+ Isolate* isolate) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (obj->IsCode()) {
+ GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
+ }
+#endif
+ if (obj->IsCode()) {
+ PROFILE(isolate, CodeDeleteEvent(obj->address()));
}
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
- return obj_size;
}
-int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
+void MarkCompactCollector::Initialize() {
+ StaticMarkingVisitor::Initialize();
}
-int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, heap()->old_data_space());
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+ return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
}
-int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, heap()->cell_space());
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ SlotType type,
+ Address addr,
+ AdditionMode mode) {
+ SlotsBuffer* buffer = *buffer_address;
+ if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+ if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+ allocator->DeallocateChain(buffer_address);
+ return false;
+ }
+ buffer = allocator->AllocateBuffer(buffer);
+ *buffer_address = buffer;
+ }
+ ASSERT(buffer->HasSpaceForTypedSlot());
+ buffer->Add(reinterpret_cast<ObjectSlot>(type));
+ buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+ return true;
}
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
- // Recover map pointer.
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
- // Get forwarding address before resetting map pointer
- Address new_addr = GetForwardingAddressInOldSpace(obj);
-
- // Reset the map pointer.
- int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
-
- Address old_addr = obj->address();
-
- if (new_addr != old_addr) {
- // Move contents.
- heap()->MoveBlock(new_addr, old_addr, obj_size);
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ return SlotsBuffer::CODE_TARGET_SLOT;
+ } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+ return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+ return SlotsBuffer::DEBUG_TARGET_SLOT;
+ } else if (RelocInfo::IsJSReturn(rmode)) {
+ return SlotsBuffer::JS_RETURN_SLOT;
}
+ UNREACHABLE();
+ return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsCode()) {
- // May also update inline cache target.
- Code::cast(copied_to)->Relocate(new_addr - old_addr);
- // Notify the logger that compiled code has moved.
- PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
- }
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
- return obj_size;
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+ Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ if (target_page->IsEvacuationCandidate() &&
+ (rinfo->host() == NULL ||
+ !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+ if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotTypeForRMode(rinfo->rmode()),
+ rinfo->pc(),
+ SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ EvictEvacuationCandidate(target_page);
+ }
+ }
}
-int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
- int obj_size = obj->Size();
-
- // Get forwarding address
- Address old_addr = obj->address();
- int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+ Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ if (target_page->IsEvacuationCandidate() &&
+ !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+ if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ slot,
+ SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ EvictEvacuationCandidate(target_page);
+ }
+ }
+}
- Address new_addr =
- Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
-#ifdef DEBUG
- if (heap()->new_space()->FromSpaceContains(new_addr)) {
- ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
- } else {
- ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
- heap()->TargetSpace(obj) == heap()->old_data_space());
- }
-#endif
+static inline SlotsBuffer::SlotType DecodeSlotType(
+ SlotsBuffer::ObjectSlot slot) {
+ return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
- // New and old addresses cannot overlap.
- if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
- heap()->CopyBlock(new_addr, old_addr, obj_size);
- } else {
- heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
- }
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", old_addr, new_addr);
- }
-#endif
+void SlotsBuffer::UpdateSlots(Heap* heap) {
+ PointersUpdatingVisitor v(heap);
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(heap()->isolate(),
- SharedFunctionInfoMoveEvent(old_addr, new_addr));
+ for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+ ObjectSlot slot = slots_[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ PointersUpdatingVisitor::UpdateSlot(heap, slot);
+ } else {
+ ++slot_idx;
+ ASSERT(slot_idx < idx_);
+ UpdateSlot(&v,
+ DecodeSlotType(slot),
+ reinterpret_cast<Address>(slots_[slot_idx]));
+ }
}
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
- return obj_size;
}
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(heap()->isolate());
- } else {
- if (code_flusher_ == NULL) return;
- delete code_flusher_;
- code_flusher_ = NULL;
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+ PointersUpdatingVisitor v(heap);
+
+ for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+ ObjectSlot slot = slots_[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+ PointersUpdatingVisitor::UpdateSlot(heap, slot);
+ }
+ } else {
+ ++slot_idx;
+ ASSERT(slot_idx < idx_);
+ Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+ if (!IsOnInvalidatedCodeObject(pc)) {
+ UpdateSlot(&v,
+ DecodeSlotType(slot),
+ reinterpret_cast<Address>(slots_[slot_idx]));
+ }
+ }
}
}
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
- Isolate* isolate) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (obj->IsCode()) {
- GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
- }
-#endif
- if (obj->IsCode()) {
- PROFILE(isolate, CodeDeleteEvent(obj->address()));
- }
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+ return new SlotsBuffer(next_buffer);
}
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
- MapWord map_word = obj->map_word();
- map_word.ClearMark();
- return obj->SizeFromMap(map_word.ToMap());
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+ delete buffer;
}
-void MarkCompactCollector::Initialize() {
- StaticPointersToNewGenUpdatingVisitor::Initialize();
- StaticMarkingVisitor::Initialize();
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+ SlotsBuffer* buffer = *buffer_address;
+ while (buffer != NULL) {
+ SlotsBuffer* next_buffer = buffer->next();
+ DeallocateBuffer(buffer);
+ buffer = next_buffer;
+ }
+ *buffer_address = NULL;
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 9b67c8aff..254f175b6 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_
+#include "compiler-intrinsics.h"
#include "spaces.h"
namespace v8 {
@@ -45,62 +46,345 @@ class MarkingVisitor;
class RootMarkingVisitor;
+class Marking {
+ public:
+ explicit Marking(Heap* heap)
+ : heap_(heap) {
+ }
+
+ static inline MarkBit MarkBitFrom(Address addr);
+
+ static inline MarkBit MarkBitFrom(HeapObject* obj) {
+ return MarkBitFrom(reinterpret_cast<Address>(obj));
+ }
+
+ // Impossible markbits: 01
+ static const char* kImpossibleBitPattern;
+ static inline bool IsImpossible(MarkBit mark_bit) {
+ return !mark_bit.Get() && mark_bit.Next().Get();
+ }
+
+ // Black markbits: 10 - this is required by the sweeper.
+ static const char* kBlackBitPattern;
+ static inline bool IsBlack(MarkBit mark_bit) {
+ return mark_bit.Get() && !mark_bit.Next().Get();
+ }
+
+ // White markbits: 00 - this is required by the mark bit clearer.
+ static const char* kWhiteBitPattern;
+ static inline bool IsWhite(MarkBit mark_bit) {
+ return !mark_bit.Get();
+ }
+
+ // Grey markbits: 11
+ static const char* kGreyBitPattern;
+ static inline bool IsGrey(MarkBit mark_bit) {
+ return mark_bit.Get() && mark_bit.Next().Get();
+ }
+
+ static inline void MarkBlack(MarkBit mark_bit) {
+ mark_bit.Set();
+ mark_bit.Next().Clear();
+ }
+
+ static inline void BlackToGrey(MarkBit markbit) {
+ markbit.Next().Set();
+ }
+
+ static inline void WhiteToGrey(MarkBit markbit) {
+ markbit.Set();
+ markbit.Next().Set();
+ }
+
+ static inline void GreyToBlack(MarkBit markbit) {
+ markbit.Next().Clear();
+ }
+
+ static inline void BlackToGrey(HeapObject* obj) {
+ BlackToGrey(MarkBitFrom(obj));
+ }
+
+ static inline void AnyToGrey(MarkBit markbit) {
+ markbit.Set();
+ markbit.Next().Set();
+ }
+
+ // Returns true if the the object whose mark is transferred is marked black.
+ bool TransferMark(Address old_start, Address new_start);
+
+#ifdef DEBUG
+ enum ObjectColor {
+ BLACK_OBJECT,
+ WHITE_OBJECT,
+ GREY_OBJECT,
+ IMPOSSIBLE_COLOR
+ };
+
+ static const char* ColorName(ObjectColor color) {
+ switch (color) {
+ case BLACK_OBJECT: return "black";
+ case WHITE_OBJECT: return "white";
+ case GREY_OBJECT: return "grey";
+ case IMPOSSIBLE_COLOR: return "impossible";
+ }
+ return "error";
+ }
+
+ static ObjectColor Color(HeapObject* obj) {
+ return Color(Marking::MarkBitFrom(obj));
+ }
+
+ static ObjectColor Color(MarkBit mark_bit) {
+ if (IsBlack(mark_bit)) return BLACK_OBJECT;
+ if (IsWhite(mark_bit)) return WHITE_OBJECT;
+ if (IsGrey(mark_bit)) return GREY_OBJECT;
+ UNREACHABLE();
+ return IMPOSSIBLE_COLOR;
+ }
+#endif
+
+ // Returns true if the transferred color is black.
+ INLINE(static bool TransferColor(HeapObject* from,
+ HeapObject* to)) {
+ MarkBit from_mark_bit = MarkBitFrom(from);
+ MarkBit to_mark_bit = MarkBitFrom(to);
+ bool is_black = false;
+ if (from_mark_bit.Get()) {
+ to_mark_bit.Set();
+ is_black = true; // Looks black so far.
+ }
+ if (from_mark_bit.Next().Get()) {
+ to_mark_bit.Next().Set();
+ is_black = false; // Was actually gray.
+ }
+ return is_black;
+ }
+
+ private:
+ Heap* heap_;
+};
+
// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
+// Marking deque for tracing live objects.
-class MarkingStack {
+class MarkingDeque {
public:
- MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
+ MarkingDeque()
+ : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
void Initialize(Address low, Address high) {
- top_ = low_ = reinterpret_cast<HeapObject**>(low);
- high_ = reinterpret_cast<HeapObject**>(high);
+ HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+ HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+ array_ = obj_low;
+ mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
+ top_ = bottom_ = 0;
overflowed_ = false;
}
- bool is_full() const { return top_ >= high_; }
+ inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
- bool is_empty() const { return top_ <= low_; }
+ inline bool IsEmpty() { return top_ == bottom_; }
bool overflowed() const { return overflowed_; }
- void clear_overflowed() { overflowed_ = false; }
+ void ClearOverflowed() { overflowed_ = false; }
+
+ void SetOverflowed() { overflowed_ = true; }
// Push the (marked) object on the marking stack if there is room,
// otherwise mark the object as overflowed and wait for a rescan of the
// heap.
- void Push(HeapObject* object) {
- CHECK(object->IsHeapObject());
- if (is_full()) {
- object->SetOverflow();
- overflowed_ = true;
+ inline void PushBlack(HeapObject* object) {
+ ASSERT(object->IsHeapObject());
+ if (IsFull()) {
+ Marking::BlackToGrey(object);
+ MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+ SetOverflowed();
} else {
- *(top_++) = object;
+ array_[top_] = object;
+ top_ = ((top_ + 1) & mask_);
}
}
- HeapObject* Pop() {
- ASSERT(!is_empty());
- HeapObject* object = *(--top_);
- CHECK(object->IsHeapObject());
+ inline void PushGrey(HeapObject* object) {
+ ASSERT(object->IsHeapObject());
+ if (IsFull()) {
+ SetOverflowed();
+ } else {
+ array_[top_] = object;
+ top_ = ((top_ + 1) & mask_);
+ }
+ }
+
+ inline HeapObject* Pop() {
+ ASSERT(!IsEmpty());
+ top_ = ((top_ - 1) & mask_);
+ HeapObject* object = array_[top_];
+ ASSERT(object->IsHeapObject());
return object;
}
+ inline void UnshiftGrey(HeapObject* object) {
+ ASSERT(object->IsHeapObject());
+ if (IsFull()) {
+ SetOverflowed();
+ } else {
+ bottom_ = ((bottom_ - 1) & mask_);
+ array_[bottom_] = object;
+ }
+ }
+
+ HeapObject** array() { return array_; }
+ int bottom() { return bottom_; }
+ int top() { return top_; }
+ int mask() { return mask_; }
+ void set_top(int top) { top_ = top; }
+
private:
- HeapObject** low_;
- HeapObject** top_;
- HeapObject** high_;
+ HeapObject** array_;
+ // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
+ // empty when top_ == bottom_. It is full when top_ + 1 == bottom
+ // (mod mask + 1).
+ int top_;
+ int bottom_;
+ int mask_;
bool overflowed_;
- DISALLOW_COPY_AND_ASSIGN(MarkingStack);
+ DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
-// -------------------------------------------------------------------------
-// Mark-Compact collector
+class SlotsBufferAllocator {
+ public:
+ SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+ void DeallocateBuffer(SlotsBuffer* buffer);
+
+ void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+// - Untyped slots are expected to contain a tagged object pointer.
+// They are recorded by an address.
+// - Typed slots are expected to contain an encoded pointer to a heap
+// object where the way of encoding depends on the type of the slot.
+// They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+ typedef Object** ObjectSlot;
+
+ explicit SlotsBuffer(SlotsBuffer* next_buffer)
+ : idx_(0), chain_length_(1), next_(next_buffer) {
+ if (next_ != NULL) {
+ chain_length_ = next_->chain_length_ + 1;
+ }
+ }
+
+ ~SlotsBuffer() {
+ }
-class OverflowedObjectsScanner;
+ void Add(ObjectSlot slot) {
+ ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
+ slots_[idx_++] = slot;
+ }
+
+ enum SlotType {
+ EMBEDDED_OBJECT_SLOT,
+ RELOCATED_CODE_OBJECT,
+ CODE_TARGET_SLOT,
+ CODE_ENTRY_SLOT,
+ DEBUG_TARGET_SLOT,
+ JS_RETURN_SLOT,
+ NUMBER_OF_SLOT_TYPES
+ };
+ void UpdateSlots(Heap* heap);
+
+ void UpdateSlotsWithFilter(Heap* heap);
+
+ SlotsBuffer* next() { return next_; }
+
+ static int SizeOfChain(SlotsBuffer* buffer) {
+ if (buffer == NULL) return 0;
+ return static_cast<int>(buffer->idx_ +
+ (buffer->chain_length_ - 1) * kNumberOfElements);
+ }
+
+ inline bool IsFull() {
+ return idx_ == kNumberOfElements;
+ }
+
+ inline bool HasSpaceForTypedSlot() {
+ return idx_ < kNumberOfElements - 1;
+ }
+
+ static void UpdateSlotsRecordedIn(Heap* heap,
+ SlotsBuffer* buffer,
+ bool code_slots_filtering_required) {
+ while (buffer != NULL) {
+ if (code_slots_filtering_required) {
+ buffer->UpdateSlotsWithFilter(heap);
+ } else {
+ buffer->UpdateSlots(heap);
+ }
+ buffer = buffer->next();
+ }
+ }
+
+ enum AdditionMode {
+ FAIL_ON_OVERFLOW,
+ IGNORE_OVERFLOW
+ };
+
+ static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+ return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+ }
+
+ static bool AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ ObjectSlot slot,
+ AdditionMode mode) {
+ SlotsBuffer* buffer = *buffer_address;
+ if (buffer == NULL || buffer->IsFull()) {
+ if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+ allocator->DeallocateChain(buffer_address);
+ return false;
+ }
+ buffer = allocator->AllocateBuffer(buffer);
+ *buffer_address = buffer;
+ }
+ buffer->Add(slot);
+ return true;
+ }
+
+ static bool IsTypedSlot(ObjectSlot slot);
+
+ static bool AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ SlotType type,
+ Address addr,
+ AdditionMode mode);
+
+ static const int kNumberOfElements = 1021;
+
+ private:
+ static const int kChainLengthThreshold = 6;
+
+ intptr_t idx_;
+ intptr_t chain_length_;
+ SlotsBuffer* next_;
+ ObjectSlot slots_[kNumberOfElements];
+};
+
+
+// -------------------------------------------------------------------------
+// Mark-Compact collector
class MarkCompactCollector {
public:
// Type of functions to compute forwarding addresses of objects in
@@ -134,13 +418,18 @@ class MarkCompactCollector {
// Set the global force_compaction flag, it must be called before Prepare
// to take effect.
- void SetForceCompaction(bool value) {
- force_compaction_ = value;
- }
+ inline void SetFlags(int flags);
+ inline bool PreciseSweepingRequired() {
+ return sweep_precisely_;
+ }
static void Initialize();
+ void CollectEvacuationCandidates(PagedSpace* space);
+
+ void AddEvacuationCandidate(Page* p);
+
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
void Prepare(GCTracer* tracer);
@@ -148,23 +437,9 @@ class MarkCompactCollector {
// Performs a global garbage collection.
void CollectGarbage();
- // True if the last full GC performed heap compaction.
- bool HasCompacted() { return compacting_collection_; }
-
- // True after the Prepare phase if the compaction is taking place.
- bool IsCompacting() {
-#ifdef DEBUG
- // For the purposes of asserts we don't want this to keep returning true
- // after the collection is completed.
- return state_ != IDLE && compacting_collection_;
-#else
- return compacting_collection_;
-#endif
- }
+ bool StartCompaction();
- // The count of the number of objects left marked at the end of the last
- // completed full GC (expected to be zero).
- int previous_marked_count() { return previous_marked_count_; }
+ void AbortCompaction();
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
@@ -179,29 +454,101 @@ class MarkCompactCollector {
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
- // Returns size of a possibly marked object.
- static int SizeOfMarkedObject(HeapObject* obj);
-
// Distinguishable invalid map encodings (for single word and multiple words)
// that indicate free regions.
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
+ static inline bool IsMarked(Object* obj);
+
inline Heap* heap() const { return heap_; }
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
void EnableCodeFlushing(bool enable);
+ enum SweeperType {
+ CONSERVATIVE,
+ LAZY_CONSERVATIVE,
+ PRECISE
+ };
+
+#ifdef DEBUG
+ void VerifyMarkbitsAreClean();
+ static void VerifyMarkbitsAreClean(PagedSpace* space);
+ static void VerifyMarkbitsAreClean(NewSpace* space);
+#endif
+
+ // Sweep a single page from the given space conservatively.
+ // Return a number of reclaimed bytes.
+ static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+
+ INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
+ return Page::FromAddress(reinterpret_cast<Address>(anchor))->
+ ShouldSkipEvacuationSlotRecording();
+ }
+
+ INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
+ return Page::FromAddress(reinterpret_cast<Address>(host))->
+ ShouldSkipEvacuationSlotRecording();
+ }
+
+ INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+ return Page::FromAddress(reinterpret_cast<Address>(obj))->
+ IsEvacuationCandidate();
+ }
+
+ void EvictEvacuationCandidate(Page* page) {
+ if (FLAG_trace_fragmentation) {
+ PrintF("Page %p is too popular. Disabling evacuation.\n",
+ reinterpret_cast<void*>(page));
+ }
+
+ // TODO(gc) If all evacuation candidates are too popular we
+ // should stop slots recording entirely.
+ page->ClearEvacuationCandidate();
+
+ // We were not collecting slots on this page that point
+ // to other evacuation candidates thus we have to
+ // rescan the page after evacuation to discover and update all
+ // pointers to evacuated objects.
+ if (page->owner()->identity() == OLD_DATA_SPACE) {
+ evacuation_candidates_.RemoveElement(page);
+ } else {
+ page->SetFlag(Page::RESCAN_ON_EVACUATION);
+ }
+ }
+
+ void RecordRelocSlot(RelocInfo* rinfo, Object* target);
+ void RecordCodeEntrySlot(Address slot, Code* target);
+
+ INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+
+ void MigrateObject(Address dst,
+ Address src,
+ int size,
+ AllocationSpace to_old_space);
+
+ bool TryPromoteObject(HeapObject* object, int object_size);
+
inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
inline void set_encountered_weak_maps(Object* weak_map) {
encountered_weak_maps_ = weak_map;
}
+ void InvalidateCode(Code* code);
+
+ void ClearMarkbits();
+
private:
MarkCompactCollector();
~MarkCompactCollector();
+ bool MarkInvalidatedCode();
+ void RemoveDeadInvalidatedCode();
+ void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+
#ifdef DEBUG
enum CollectorState {
IDLE,
@@ -217,23 +564,26 @@ class MarkCompactCollector {
CollectorState state_;
#endif
- // Global flag that forces a compaction.
- bool force_compaction_;
+ // Global flag that forces sweeping to be precise, so we can traverse the
+ // heap.
+ bool sweep_precisely_;
- // Global flag indicating whether spaces were compacted on the last GC.
- bool compacting_collection_;
+ // True if we are collecting slots to perform evacuation from evacuation
+ // candidates.
+ bool compacting_;
- // Global flag indicating whether spaces will be compacted on the next GC.
- bool compact_on_next_gc_;
+ bool was_marked_incrementally_;
- // The number of objects left marked at the end of the last completed full
- // GC (expected to be zero).
- int previous_marked_count_;
+ bool collect_maps_;
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
+ SlotsBufferAllocator slots_buffer_allocator_;
+
+ SlotsBuffer* migration_slots_buffer_;
+
// Finishes GC, performs heap verification if enabled.
void Finish();
@@ -258,13 +608,13 @@ class MarkCompactCollector {
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
- void MarkUnmarkedObject(HeapObject* obj);
+ void AfterMarking();
- inline void MarkObject(HeapObject* obj) {
- if (!obj->IsMarked()) MarkUnmarkedObject(obj);
- }
+ INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
- inline void SetMark(HeapObject* obj);
+ INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+
+ void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
@@ -298,18 +648,18 @@ class MarkCompactCollector {
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
- void ProcessMarkingStack();
+ void ProcessMarkingDeque();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
- void EmptyMarkingStack();
+ void EmptyMarkingDeque();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
- void RefillMarkingStack();
+ void RefillMarkingDeque();
// After reachable maps have been marked process per context object
// literal map caches removing unmarked entries.
@@ -319,21 +669,16 @@ class MarkCompactCollector {
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
-#ifdef DEBUG
- void UpdateLiveObjectCount(HeapObject* obj);
-#endif
-
- // We sweep the large object space in the same way whether we are
- // compacting or not, because the large object space is never compacted.
- void SweepLargeObjectSpace();
-
- // Test whether a (possibly marked) object is a Map.
- static inline bool SafeIsMap(HeapObject* object);
-
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveTransitions();
+ // Marking detaches initial maps from SharedFunctionInfo objects
+ // to make this reference weak. We need to reattach initial maps
+ // back after collection. This is either done during
+ // ClearNonLiveTransitions pass or by calling this function.
+ void ReattachInitialMaps();
+
// Mark all values associated with reachable keys in weak maps encountered
// so far. This might push new object or even new weak maps onto the
// marking stack.
@@ -346,164 +691,31 @@ class MarkCompactCollector {
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
- // a non-compacting collection, or else computing and encoding
- // forwarding addresses for a compacting collection.
+ // a non-compacting collection.
//
// Before: Live objects are marked and non-live objects are unmarked.
//
- // After: (Non-compacting collection.) Live objects are unmarked,
- // non-live regions have been added to their space's free
- // list.
- //
- // After: (Compacting collection.) The forwarding address of live
- // objects in the paged spaces is encoded in their map word
- // along with their (non-forwarded) map pointer.
- //
- // The forwarding address of live objects in the new space is
- // written to their map word's offset in the inactive
- // semispace.
- //
- // Bookkeeping data is written to the page header of
- // eached paged-space page that contains live objects after
- // compaction:
- //
- // The allocation watermark field is used to track the
- // relocation top address, the address of the first word
- // after the end of the last live object in the page after
- // compaction.
- //
- // The Page::mc_page_index field contains the zero-based index of the
- // page in its space. This word is only used for map space pages, in
- // order to encode the map addresses in 21 bits to free 11
- // bits per map word for the forwarding address.
+ // After: Live objects are unmarked, non-live regions have been added to
+ // their space's free list. Active eden semispace is compacted by
+ // evacuation.
//
- // The Page::mc_first_forwarded field contains the (nonencoded)
- // forwarding address of the first live object in the page.
- //
- // In both the new space and the paged spaces, a linked list
- // of live regions is constructructed (linked through
- // pointers in the non-live region immediately following each
- // live region) to speed further passes of the collector.
-
- // Encodes forwarding addresses of objects in compactable parts of the
- // heap.
- void EncodeForwardingAddresses();
-
- // Encodes the forwarding addresses of objects in new space.
- void EncodeForwardingAddressesInNewSpace();
-
- // Function template to encode the forwarding addresses of objects in
- // paged spaces, parameterized by allocation and non-live processing
- // functions.
- template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
- void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
-
- // Iterates live objects in a space, passes live objects
- // to a callback function which returns the heap size of the object.
- // Returns the number of live objects iterated.
- int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
- int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
-
- // Iterates the live objects between a range of addresses, returning the
- // number of live objects.
- int IterateLiveObjectsInRange(Address start, Address end,
- LiveObjectCallback size_func);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
void SweepSpaces();
- // -----------------------------------------------------------------------
- // Phase 3: Updating pointers in live objects.
- //
- // Before: Same as after phase 2 (compacting collection).
- //
- // After: All pointers in live objects, including encoded map
- // pointers, are updated to point to their target's new
- // location.
-
- friend class UpdatingVisitor; // helper for updating visited objects
+ void EvacuateNewSpace();
- // Updates pointers in all spaces.
- void UpdatePointers();
+ void EvacuateLiveObjectsFromPage(Page* p);
- // Updates pointers in an object in new space.
- // Returns the heap size of the object.
- int UpdatePointersInNewObject(HeapObject* obj);
+ void EvacuatePages();
- // Updates pointers in an object in old spaces.
- // Returns the heap size of the object.
- int UpdatePointersInOldObject(HeapObject* obj);
+ void EvacuateNewSpaceAndCandidates();
- // Calculates the forwarding address of an object in an old space.
- static Address GetForwardingAddressInOldSpace(HeapObject* obj);
-
- // -----------------------------------------------------------------------
- // Phase 4: Relocating objects.
- //
- // Before: Pointers to live objects are updated to point to their
- // target's new location.
- //
- // After: Objects have been moved to their new addresses.
-
- // Relocates objects in all spaces.
- void RelocateObjects();
-
- // Converts a code object's inline target to addresses, convention from
- // address to target happens in the marking phase.
- int ConvertCodeICTargetToAddress(HeapObject* obj);
-
- // Relocate a map object.
- int RelocateMapObject(HeapObject* obj);
-
- // Relocates an old object.
- int RelocateOldPointerObject(HeapObject* obj);
- int RelocateOldDataObject(HeapObject* obj);
-
- // Relocate a property cell object.
- int RelocateCellObject(HeapObject* obj);
-
- // Helper function.
- inline int RelocateOldNonCodeObject(HeapObject* obj,
- PagedSpace* space);
-
- // Relocates an object in the code space.
- int RelocateCodeObject(HeapObject* obj);
-
- // Copy a new object.
- int RelocateNewObject(HeapObject* obj);
+ void SweepSpace(PagedSpace* space, SweeperType sweeper);
#ifdef DEBUG
- // -----------------------------------------------------------------------
- // Debugging variables, functions and classes
- // Counters used for debugging the marking phase of mark-compact or
- // mark-sweep collection.
-
- // Size of live objects in Heap::to_space_.
- int live_young_objects_size_;
-
- // Size of live objects in Heap::old_pointer_space_.
- int live_old_pointer_objects_size_;
-
- // Size of live objects in Heap::old_data_space_.
- int live_old_data_objects_size_;
-
- // Size of live objects in Heap::code_space_.
- int live_code_objects_size_;
-
- // Size of live objects in Heap::map_space_.
- int live_map_objects_size_;
-
- // Size of live objects in Heap::cell_space_.
- int live_cell_objects_size_;
-
- // Size of live objects in Heap::lo_space_.
- int live_lo_objects_size_;
-
- // Number of live bytes in this collection.
- int live_bytes_;
-
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
@@ -512,15 +724,19 @@ class MarkCompactCollector {
#endif
Heap* heap_;
- MarkingStack marking_stack_;
+ MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
+ List<Page*> evacuation_candidates_;
+ List<Code*> invalidated_code_;
+
friend class Heap;
- friend class OverflowedObjectsScanner;
};
+const char* AllocationSpaceName(AllocationSpace space);
+
} } // namespace v8::internal
#endif // V8_MARK_COMPACT_H_
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index b5a6d1811..18492aa05 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -189,7 +189,7 @@ function MathSqrt(x) {
// ECMA 262 - 15.8.2.18
function MathTan(x) {
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_tan(x);
+ return %_MathTan(x);
}
@@ -239,7 +239,7 @@ function SetUpMath() {
// Set up non-enumerable functions of the Math object and
// set their names.
- InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
+ InstallFunctions($Math, DONT_ENUM, $Array(
"random", MathRandom,
"abs", MathAbs,
"acos", MathAcos,
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index b6ad5ac35..a0793c2df 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -1,5 +1,4 @@
-
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -81,11 +80,11 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
}
Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? FACTORY->undefined_value()
+ ? Handle<Object>::cast(FACTORY->undefined_value())
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? FACTORY->undefined_value()
+ ? Handle<Object>::cast(FACTORY->undefined_value())
: Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message =
@@ -127,7 +126,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
v8::MessageCallback callback =
- FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
+ FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
Handle<Object> callback_data(listener.get(1));
{
// Do not allow exceptions to propagate.
@@ -149,12 +148,15 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
JSFunction::cast(
Isolate::Current()->js_builtins_object()->
GetPropertyNoExceptionThrown(*fmt_str)));
- Object** argv[1] = { data.location() };
+ Handle<Object> argv[] = { data };
bool caught_exception;
Handle<Object> result =
Execution::TryCall(fun,
- Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
+ Isolate::Current()->js_builtins_object(),
+ ARRAY_SIZE(argv),
+ argv,
+ &caught_exception);
if (caught_exception || !result->IsString()) {
return FACTORY->LookupAsciiSymbol("<error>");
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index a9993af22..5a3f12ee3 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -83,7 +83,7 @@ function IsNativeErrorObject(obj) {
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
if (IsNativeErrorObject(obj)) {
- return %_CallFunction(obj, errorToString);
+ return %_CallFunction(obj, ErrorToString);
} else {
return ToString(obj);
}
@@ -185,18 +185,20 @@ function FormatMessage(message) {
"define_disallowed", ["Cannot define property:", "%0", ", object is not extensible."],
"non_extensible_proto", ["%0", " is not extensible"],
"handler_non_object", ["Proxy.", "%0", " called with non-object as handler"],
- "trap_function_expected", ["Proxy.", "%0", " called with non-function for ", "%1", " trap"],
+ "proto_non_object", ["Proxy.", "%0", " called with non-object as prototype"],
+ "trap_function_expected", ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
"handler_trap_missing", ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
"handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
- "handler_returned_false", ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
- "handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
- "proxy_prop_not_configurable", ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
- "proxy_non_object_prop_names", ["Trap ", "%1", " returned non-object ", "%0"],
- "proxy_repeated_prop_name", ["Trap ", "%1", " returned repeated property name ", "%2"],
+ "handler_returned_false", ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
+ "handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
+ "proxy_prop_not_configurable", ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
+ "proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"],
+ "proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
"invalid_weakmap_key", ["Invalid value used as weak map key"],
// RangeError
"invalid_array_length", ["Invalid array length"],
"stack_overflow", ["Maximum call stack size exceeded"],
+ "invalid_time_value", ["Invalid time value"],
// SyntaxError
"unable_to_parse", ["Parse error"],
"invalid_regexp_flags", ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
@@ -204,6 +206,7 @@ function FormatMessage(message) {
"illegal_break", ["Illegal break statement"],
"illegal_continue", ["Illegal continue statement"],
"illegal_return", ["Illegal return statement"],
+ "illegal_let", ["Illegal let declaration outside extended mode"],
"error_loading_debugger", ["Error loading debugger"],
"no_input_to_regexp", ["No input to ", "%0"],
"invalid_json", ["String '", "%0", "' is not valid JSON"],
@@ -240,20 +243,26 @@ function FormatMessage(message) {
"strict_poison_pill", ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
"strict_caller", ["Illegal access to a strict mode caller function."],
"unprotected_let", ["Illegal let declaration in unprotected statement context."],
+ "unprotected_const", ["Illegal const declaration in unprotected statement context."],
"cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
"redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
];
var messages = { __proto__ : null };
- var desc = new PropertyDescriptor();
- desc.setConfigurable(false);
- desc.setEnumerable(false);
- desc.setWritable(false);
for (var i = 0; i < messagesDictionary.length; i += 2) {
var key = messagesDictionary[i];
var format = messagesDictionary[i + 1];
- ObjectFreeze(format);
- desc.setValue(format);
- DefineOwnProperty(messages, key, desc);
+
+ for (var j = 0; j < format.length; j++) {
+ %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+ }
+ %IgnoreAttributesAndSetProperty(format, 'length', format.length,
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+ %PreventExtensions(format);
+ %IgnoreAttributesAndSetProperty(messages,
+ key,
+ format,
+ DONT_DELETE | DONT_ENUM | READ_ONLY);
}
%PreventExtensions(messages);
%IgnoreAttributesAndSetProperty(builtins, "kMessages",
@@ -386,7 +395,7 @@ function ScriptLocationFromPosition(position,
}
return new SourceLocation(this, position, line, column, start, end);
-};
+}
/**
@@ -416,7 +425,7 @@ function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
// resource.
var column = opt_column || 0;
if (line == 0) {
- column -= this.column_offset
+ column -= this.column_offset;
}
var offset_position = opt_offset_position || 0;
@@ -431,7 +440,8 @@ function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
return null;
}
- return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
+ return this.locationFromPosition(
+ this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
}
}
@@ -447,8 +457,10 @@ function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
* invalid
*/
function ScriptSourceSlice(opt_from_line, opt_to_line) {
- var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
- var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
+ var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
+ : opt_from_line;
+ var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
+ : opt_to_line;
// Adjust according to the offset within the resource.
from_line -= this.line_offset;
@@ -468,8 +480,10 @@ function ScriptSourceSlice(opt_from_line, opt_to_line) {
var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
// Return a source slice with line numbers re-adjusted to the resource.
- return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
- from_position, to_position);
+ return new SourceSlice(this,
+ from_line + this.line_offset,
+ to_line + this.line_offset,
+ from_position, to_position);
}
@@ -502,7 +516,7 @@ function ScriptSourceLine(opt_line) {
function ScriptLineCount() {
// Return number of source lines.
return this.line_ends.length;
-};
+}
/**
@@ -567,10 +581,10 @@ SetUpLockedPrototype(Script,
* position : position within the source
* start : position of start of source context (inclusive)
* end : position of end of source context (not inclusive)
- * Source text for the source context is the character interval [start, end[. In
- * most cases end will point to a newline character. It might point just past
- * the final position of the source if the last source line does not end with a
- * newline character.
+ * Source text for the source context is the character interval
+ * [start, end[. In most cases end will point to a newline character.
+ * It might point just past the final position of the source if the last
+ * source line does not end with a newline character.
* @param {Script} script The Script object for which this is a location
* @param {number} position Source position for the location
* @param {number} line The line number for the location
@@ -637,7 +651,7 @@ function SourceLocationRestrict(opt_limit, opt_before) {
this.end = this.start + limit;
}
}
-};
+}
/**
@@ -646,8 +660,11 @@ function SourceLocationRestrict(opt_limit, opt_before) {
* Source text for this location.
*/
function SourceLocationSourceText() {
- return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
-};
+ return %_CallFunction(this.script.source,
+ this.start,
+ this.end,
+ StringSubstring);
+}
SetUpLockedPrototype(SourceLocation,
@@ -655,7 +672,7 @@ SetUpLockedPrototype(SourceLocation,
$Array(
"restrict", SourceLocationRestrict,
"sourceText", SourceLocationSourceText
- )
+ )
);
@@ -695,7 +712,7 @@ function SourceSliceSourceText() {
this.from_position,
this.to_position,
StringSubstring);
-};
+}
SetUpLockedPrototype(SourceSlice,
$Array("script", "from_line", "to_line", "from_position", "to_position"),
@@ -742,12 +759,8 @@ function DefineOneShotAccessor(obj, name, fun) {
hasBeenSet = true;
value = v;
}
- var desc = { get: getter,
- set: setter,
- enumerable: false,
- configurable: true };
- desc = ToPropertyDescriptor(desc);
- DefineOwnProperty(obj, name, desc, true);
+ %DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM);
+ %DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM);
}
function CallSite(receiver, fun, pos) {
@@ -758,7 +771,7 @@ function CallSite(receiver, fun, pos) {
function CallSiteGetThis() {
return this.receiver;
-};
+}
function CallSiteGetTypeName() {
var constructor = this.receiver.constructor;
@@ -770,33 +783,33 @@ function CallSiteGetTypeName() {
return %_CallFunction(this.receiver, ObjectToString);
}
return constructorName;
-};
+}
function CallSiteIsToplevel() {
if (this.receiver == null) {
return true;
}
return IS_GLOBAL(this.receiver);
-};
+}
function CallSiteIsEval() {
var script = %FunctionGetScript(this.fun);
return script && script.compilation_type == COMPILATION_TYPE_EVAL;
-};
+}
function CallSiteGetEvalOrigin() {
var script = %FunctionGetScript(this.fun);
return FormatEvalOrigin(script);
-};
+}
function CallSiteGetScriptNameOrSourceURL() {
var script = %FunctionGetScript(this.fun);
return script ? script.nameOrSourceURL() : null;
-};
+}
function CallSiteGetFunction() {
return this.fun;
-};
+}
function CallSiteGetFunctionName() {
// See if the function knows its own name
@@ -812,15 +825,19 @@ function CallSiteGetFunctionName() {
return "eval";
}
return null;
-};
+}
function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds
// this function.
var ownName = this.fun.name;
if (ownName && this.receiver &&
- (%_CallFunction(this.receiver, ownName, ObjectLookupGetter) === this.fun ||
- %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun ||
+ (%_CallFunction(this.receiver,
+ ownName,
+ ObjectLookupGetter) === this.fun ||
+ %_CallFunction(this.receiver,
+ ownName,
+ ObjectLookupSetter) === this.fun ||
this.receiver[ownName] === this.fun)) {
// To handle DontEnum properties we guess that the method has
// the same name as the function.
@@ -830,7 +847,8 @@ function CallSiteGetMethodName() {
for (var prop in this.receiver) {
if (this.receiver.__lookupGetter__(prop) === this.fun ||
this.receiver.__lookupSetter__(prop) === this.fun ||
- (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
+ (!this.receiver.__lookupGetter__(prop) &&
+ this.receiver[prop] === this.fun)) {
// If we find more than one match bail out to avoid confusion.
if (name) {
return null;
@@ -842,12 +860,12 @@ function CallSiteGetMethodName() {
return name;
}
return null;
-};
+}
function CallSiteGetFileName() {
var script = %FunctionGetScript(this.fun);
return script ? script.name : null;
-};
+}
function CallSiteGetLineNumber() {
if (this.pos == -1) {
@@ -859,7 +877,7 @@ function CallSiteGetLineNumber() {
location = script.locationFromPosition(this.pos, true);
}
return location ? location.line + 1 : null;
-};
+}
function CallSiteGetColumnNumber() {
if (this.pos == -1) {
@@ -871,16 +889,16 @@ function CallSiteGetColumnNumber() {
location = script.locationFromPosition(this.pos, true);
}
return location ? location.column + 1: null;
-};
+}
function CallSiteIsNative() {
var script = %FunctionGetScript(this.fun);
return script ? (script.type == TYPE_NATIVE) : false;
-};
+}
function CallSiteGetPosition() {
return this.pos;
-};
+}
function CallSiteIsConstructor() {
var constructor = this.receiver ? this.receiver.constructor : null;
@@ -888,7 +906,7 @@ function CallSiteIsConstructor() {
return false;
}
return this.fun === constructor;
-};
+}
SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getThis", CallSiteGetThis,
@@ -931,12 +949,13 @@ function FormatEvalOrigin(script) {
// eval script originated from "real" source.
if (eval_from_script.name) {
eval_origin += " (" + eval_from_script.name;
- var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
+ var location = eval_from_script.locationFromPosition(
+ script.eval_from_script_position, true);
if (location) {
eval_origin += ":" + (location.line + 1);
eval_origin += ":" + (location.column + 1);
}
- eval_origin += ")"
+ eval_origin += ")";
} else {
eval_origin += " (unknown source)";
}
@@ -944,7 +963,7 @@ function FormatEvalOrigin(script) {
}
return eval_origin;
-};
+}
function FormatSourcePosition(frame) {
var fileName;
@@ -953,8 +972,9 @@ function FormatSourcePosition(frame) {
fileLocation = "native";
} else if (frame.isEval()) {
fileName = frame.getScriptNameOrSourceURL();
- if (!fileName)
+ if (!fileName) {
fileLocation = frame.getEvalOrigin();
+ }
} else {
fileName = frame.getFileName();
}
@@ -1063,7 +1083,7 @@ function captureStackTrace(obj, cons_opt) {
DefineOneShotAccessor(obj, 'stack', function (obj) {
return FormatRawStackTrace(obj, raw_stack);
});
-};
+}
function SetUpError() {
@@ -1126,6 +1146,7 @@ function SetUpError() {
return new f(m);
}
});
+ %SetNativeFlag(f);
}
DefineError(function Error() { });
@@ -1143,42 +1164,43 @@ $Error.captureStackTrace = captureStackTrace;
%SetProperty($Error.prototype, 'message', '', DONT_ENUM);
-// Global list of error objects visited during errorToString. This is
+// Global list of error objects visited during ErrorToString. This is
// used to detect cycles in error toString formatting.
const visited_errors = new InternalArray();
const cyclic_error_marker = new $Object();
-function errorToStringDetectCycle(error) {
+function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
var type = error.type;
+ var name = error.name;
+ name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
+ var message = error.message;
var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
if (type && !hasMessage) {
- var formatted = FormatMessage(%NewMessageObject(type, error.arguments));
- return error.name + ": " + formatted;
+ message = FormatMessage(%NewMessageObject(type, error.arguments));
}
- var message = hasMessage ? (": " + error.message) : "";
- return error.name + message;
+ message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
+ if (name === "") return message;
+ if (message === "") return name;
+ return name + ": " + message;
} finally {
visited_errors.length = visited_errors.length - 1;
}
}
-function errorToString() {
+function ErrorToString() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Error.prototype.toString"]);
}
- // This helper function is needed because access to properties on
- // the builtins object do not work inside of a catch clause.
- function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
try {
- return errorToStringDetectCycle(this);
+ return ErrorToStringDetectCycle(this);
} catch(e) {
// If this error message was encountered already return the empty
// string for it instead of recursively formatting it.
- if (isCyclicErrorMarker(e)) {
+ if (e === cyclic_error_marker) {
return '';
}
throw e;
@@ -1186,7 +1208,7 @@ function errorToString() {
}
-InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
+InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index c4c4fd259..2ba9760e2 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -78,7 +78,6 @@ bool Operand::is_reg() const {
}
-
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -117,9 +116,14 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -146,9 +150,15 @@ Object** RelocInfo::target_object_address() {
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -176,10 +186,17 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -200,6 +217,11 @@ void RelocInfo::set_call_address(Address target) {
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -242,12 +264,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Object** p = target_object_address();
- Object* orig = *p;
- visitor->VisitPointer(p);
- if (*p != orig) {
- set_target_object(*p);
- }
+ visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -257,9 +274,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
@@ -273,7 +290,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e01a0ca70..e933181d4 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -74,7 +74,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+ CpuFeaturesImpliedByCompiler());
+ ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -82,8 +84,7 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also allowed for generated code in the
// snapshot.
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
+ supported_ |= standard_features;
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2018,7 +2019,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2041,7 +2043,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 38e9537af..b66ea0d9f 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -302,7 +302,7 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
-const FPURegister kDoubleRegZero = f28;
+static const FPURegister& kDoubleRegZero = f28;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index d77230448..98fd57de7 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -88,12 +88,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
// Allocate an empty JSArray. The allocated array is put into the result
// register. An elements backing store is allocated with size initial_capacity
// and filled with the hole values.
@@ -103,16 +97,19 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity > 0);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ lw(scratch1, FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
- int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
__ AllocateInNewSpace(size,
result,
scratch2,
@@ -131,6 +128,11 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ mov(scratch3, zero_reg);
__ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+ if (initial_capacity == 0) {
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+ return;
+ }
+
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
@@ -147,21 +149,31 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array (untagged)
// scratch2: start of next object
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
__ sw(scratch3, MemOperand(scratch1));
__ Addu(scratch1, scratch1, kPointerSize);
__ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
__ sw(scratch3, MemOperand(scratch1));
__ Addu(scratch1, scratch1, kPointerSize);
- // Fill the FixedArray with the hole value.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < initial_capacity; i++) {
+ static const int kLoopUnfoldLimit = 4;
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ for (int i = 0; i < initial_capacity; i++) {
+ __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
+ }
+ } else {
+ Label loop, entry;
+ __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+ __ Branch(&entry);
+ __ bind(&loop);
__ sw(scratch3, MemOperand(scratch1));
__ Addu(scratch1, scratch1, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, lt, scratch1, Operand(scratch2));
}
}
@@ -177,7 +189,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array_storage is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array_storage,
Register elements_array_end,
@@ -185,31 +197,18 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch2,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
-
// Load the initial map from the array function.
__ lw(elements_array_storage,
FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
- // Check whether an empty sized array is requested.
- __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize +
- FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch1,
- gc_required,
- TAG_OBJECT);
- __ Branch(&allocated);
+ if (FLAG_debug_code) { // Assert that array size is not zero.
+ __ Assert(
+ ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
+ }
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
- __ bind(&not_empty);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ li(elements_array_end,
(JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
@@ -228,7 +227,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array_storage: initial map
// array_size: size of array (smi)
- __ bind(&allocated);
__ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
__ sw(elements_array_storage,
@@ -262,8 +260,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// the actual JSArray has length 0 and the size of the JSArray for non-empty
// JSArrays. The length of a FixedArray is stored as a smi.
STATIC_ASSERT(kSmiTag == 0);
- __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ movz(array_size, at, array_size);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ sw(array_size, MemOperand(elements_array_storage));
@@ -312,18 +308,18 @@ static void AllocateJSArray(MacroAssembler* masm,
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more;
+ Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
// Check for array construction with zero arguments or one.
__ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
// Handle construction of an empty array.
+ __ bind(&empty_array);
AllocateEmptyJSArray(masm,
a1,
a2,
a3,
t0,
t1,
- JSArray::kPreallocatedArrayElements,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a3, t0);
// Setup return value, remove receiver from stack and return.
@@ -338,6 +334,12 @@ static void ArrayNativeCode(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
__ lw(a2, MemOperand(sp)); // Get the argument from the stack.
+ __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
+ __ Drop(1); // Adjust stack.
+ __ mov(a0, zero_reg); // Treat this as a call with argc of zero.
+ __ Branch(&empty_array);
+
+ __ bind(&not_empty_array);
__ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
__ Branch(call_generic_code, eq, a3, Operand(zero_reg));
@@ -587,10 +589,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
- __ EnterInternalFrame();
- __ push(v0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
__ pop(function);
__ mov(argument, v0);
__ Branch(&argument_is_string);
@@ -606,10 +609,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
- __ EnterInternalFrame();
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
__ Ret();
}
@@ -622,13 +626,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that the function is not a smi.
- __ And(t0, a1, Operand(kSmiTagMask));
- __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+ __ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
__ GetObjectType(a1, a2, a2);
- __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
// Jump to the function-specific construct stub.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -638,13 +641,21 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
+ // a2: object type
+ Label do_call;
+ __ bind(&slow);
+ __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc.
// Set expected number of arguments to zero (not changing a0).
__ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -667,331 +678,334 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -----------------------------------
// Enter a construct frame.
- __ EnterConstructFrame();
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Preserve the two incoming parameters on the stack.
- __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
- __ MultiPushReversed(a0.bit() | a1.bit());
+ // Preserve the two incoming parameters on the stack.
+ __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
+ __ MultiPushReversed(a0.bit() | a1.bit());
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ // Use t7 to hold undefined, which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- Label rt_call, allocated;
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- if (FLAG_inline_new) {
- Label undo_allocation;
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
#endif
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Branch(&rt_call, eq, t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &rt_call);
+ __ GetObjectType(a2, a3, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
- __ lbu(t0, constructor_count);
- __ Subu(t0, t0, Operand(1));
- __ sb(t0, constructor_count);
- __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
- __ Push(a1, a2);
-
- __ push(a1); // Constructor.
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(a2);
- __ pop(a1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+ __ lbu(t0, constructor_count);
+ __ Subu(t0, t0, Operand(1));
+ __ sb(t0, constructor_count);
+ __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+ __ Push(a1, a2);
+
+ __ push(a1); // Constructor.
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(a2);
+ __ pop(a1);
+
+ __ bind(&allocate);
+ }
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t4: JSObject (not tagged)
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3*kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- { Label loop, entry;
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t4: JSObject (not tagged)
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t6, t4, t0); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ addu(a0, t5, t0);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ Assert(le, "Unexpected number of pre-allocated property fields.",
+ a0, Operand(t6));
+ }
+ __ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
}
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(t5, 0));
- __ addiu(t5, t5, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, Uless, t5, Operand(t6));
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with allocated
- // object if not fall through to runtime call if it is.
- // a1: constructor function
- // t4: JSObject
- // t5: start of next object (not tagged)
- __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields and
- // in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ And(t6,
- a0,
- Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
- __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
- __ Addu(a3, a3, Operand(t0));
- __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
- __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
- __ subu(a3, a3, t0);
-
- // Done if no extra properties are to be allocated.
- __ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, "Property allocation count failed.",
- a3, Operand(zero_reg));
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // a1: constructor
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: start of next object
- __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- a0,
- t5,
- t6,
- a2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // a1: constructor
- // a3: number of elements in properties array (un-tagged)
- // t4: JSObject
- // t5: start of next object
- __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
- __ mov(a2, t5);
- __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
- __ sll(a0, a3, kSmiTagSize);
- __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(2 * kPointerSize));
-
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
- // Initialize the fields to undefined.
- // a1: constructor
- // a2: First element of FixedArray (not tagged)
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ sll(t3, a3, kPointerSizeLog2);
- __ addu(t6, a2, t3); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ __ InitializeFieldsWithFiller(t5, t6, t7);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Addu(a3, a3, Operand(t6));
+ __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+ kBitsPerByte);
+ __ subu(a3, a3, t6);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: start of next object
+ __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ a0,
+ t5,
+ t6,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (un-tagged)
+ // t4: JSObject
+ // t5: start of next object
+ __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t5);
+ __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+ __ sll(a0, a3, kSmiTagSize);
+ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ sll(t3, a3, kPointerSizeLog2);
+ __ addu(t6, a2, t3); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(a2));
+ __ addiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t6));
}
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(a2));
- __ addiu(a2, a2, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, less, a2, Operand(t6));
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t4, t5);
}
- // Store the initialized FixedArray into the properties field of
- // the JSObject.
+ __ bind(&rt_call);
+ // Allocate the new receiver object using the runtime call.
// a1: constructor function
+ __ push(a1); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(t4, v0);
+
+ // Receiver for constructor call allocated.
// t4: JSObject
- // t5: FixedArray (not tagged)
- __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+ __ bind(&allocated);
+ __ push(t4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ MultiPushReversed(a1.bit() | t4.bit());
- // Continue with JSObject being successfully allocated.
+ // Reload the number of arguments from the stack.
// a1: constructor function
- // a4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // t4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(t4, t5);
- }
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ lw(a3, MemOperand(sp, 4 * kPointerSize));
- __ bind(&rt_call);
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- __ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(t4, v0);
-
- // Receiver for constructor call allocated.
- // t4: JSObject
- __ bind(&allocated);
- __ push(t4);
-
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, kPointerSize));
- __ MultiPushReversed(a1.bit() | t4.bit());
-
- // Reload the number of arguments from the stack.
- // a1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+ // Setup pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Setup pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ // Setup number of arguments for function call below.
+ __ srl(a0, a3, kSmiTagSize);
- // Setup number of arguments for function call below.
- __ srl(a0, a3, kSmiTagSize);
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(a3, a3, Operand(-2));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ lw(t1, MemOperand(t0));
- __ push(t1);
- __ bind(&entry);
- __ Addu(a3, a3, Operand(-2));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- if (is_api_function) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Pop the function from the stack.
+ // v0: result
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ Pop();
+
+ // Restore context from the frame.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(v0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a3, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
}
- // Pop the function from the stack.
- // v0: result
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ Pop();
-
- // Restore context from the frame.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ And(t0, v0, Operand(kSmiTagMask));
- __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a3, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveConstructFrame();
__ sll(t0, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, t0);
__ Addu(sp, sp, kPointerSize);
@@ -1031,58 +1045,60 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, zero_reg);
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Set up the context from the function argument.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ li(s6, Operand(roots_address));
+ // Set up the roots register.
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ li(s6, Operand(roots_array_start));
- // Push the function and the receiver onto the stack.
- __ Push(a1, a2);
+ // Push the function and the receiver onto the stack.
+ __ Push(a1, a2);
- // Copy arguments to the stack in a loop.
- // a3: argc
- // s0: argv, ie points to first arg
- Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t2, s0, t0);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(t2));
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t0);
- __ mov(s2, t0);
- __ mov(s3, t0);
- __ mov(s4, t0);
- __ mov(s5, t0);
- // s6 holds the root address. Do not clobber.
- // s7 is cp. Do not init.
-
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Copy arguments to the stack in a loop.
+ // a3: argc
+ // s0: argv, ie points to first arg
+ Label loop, entry;
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t2, s0, t0);
+ __ b(&entry);
+ __ nop(); // Branch delay slot nop.
+ // t2 points past last arg.
+ __ bind(&loop);
+ __ lw(t0, MemOperand(s0)); // Read next parameter.
+ __ addiu(s0, s0, kPointerSize);
+ __ lw(t0, MemOperand(t0)); // Dereference handle.
+ __ push(t0); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, s0, Operand(t2));
- __ LeaveInternalFrame();
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t0);
+ __ mov(s2, t0);
+ __ mov(s3, t0);
+ __ mov(s4, t0);
+ __ mov(s5, t0);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code and pass argc as a0.
+ __ mov(a0, a3);
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+
+ // Leave internal frame.
+ }
__ Jump(ra);
}
@@ -1100,27 +1116,28 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ // Call the runtime function.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down temporary frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(t9);
@@ -1129,50 +1146,120 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down temporary frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(t9);
}
-// These functions are called from C++ but cannot be used in live code.
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it -> t2.
+ __ lw(t2, MemOperand(sp, 0 * kPointerSize));
+ __ SmiUntag(t2);
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ Branch(&with_tos_register,
+ ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&with_tos_register);
+ __ lw(v0, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+
+ __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&unknown_state);
+ __ stop("no cases left");
+}
+
+
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ // For now, we are relying on the fact that Runtime::NotifyOSR
+ // doesn't do any garbage collection which allows us to save/restore
+ // the registers without worrying about which of them contain
+ // pointers. This seems a bit fragile.
+ RegList saved_regs =
+ (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
+ __ MultiPush(saved_regs);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
+ __ MultiPop(saved_regs);
+ __ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- __ Abort("Call to unimplemented function in builtins-mips.cc");
+ CpuFeatures::TryForceFeatureScope scope(VFP3);
+ if (!CpuFeatures::IsSupported(FPU)) {
+ __ Abort("Unreachable code: Cannot optimize without FPU support.");
+ return;
+ }
+
+ // Lookup the function in the JavaScript frame and push it as an
+ // argument to the on-stack replacement function.
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the result was -1 it means that we couldn't optimize the
+ // function. Just return and continue in the unoptimized version.
+ __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+
+ // Untag the AST id and push it on the stack.
+ __ SmiUntag(v0);
+ __ push(v0);
+
+ // Generate the code for doing the frame-to-frame translation using
+ // the deoptimizer infrastructure.
+ Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+ generator.Generate();
}
@@ -1190,19 +1277,19 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
// a0: actual number of arguments
- Label non_function;
+ Label slow, non_function;
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
- __ And(at, a1, Operand(kSmiTagMask));
- __ Branch(&non_function, eq, at, Operand(zero_reg));
+ __ JumpIfSmi(a1, &non_function);
__ GetObjectType(a1, a2, a2);
- __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
// 3a. Patch the first argument if necessary when calling a function.
// a0: actual number of arguments
// a1: function
Label shift_arguments;
+ __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION.
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1210,13 +1297,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Do not transform the receiver for strict mode functions.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+ __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
// Do not transform the receiver for native (Compilerhints already in a3).
- __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+ __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
// Compute the receiver in non-strict mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1238,21 +1325,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a2, v0);
-
- __ pop(a0);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- __ LeaveInternalFrame();
- // Restore the function to a1.
+ // Enter an internal frame in order to preserve argument count.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ push(a0);
+
+ __ push(a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ // Leave internal frame.
+ }
+ // Restore the function to a1, and the flag to t0.
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
+ __ li(t0, Operand(0, RelocInfo::NONE));
__ Branch(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1273,25 +1364,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&shift_arguments);
}
- // 3b. Patch the first argument when calling a non-function. The
+ // 3b. Check for function proxy.
+ __ bind(&slow);
+ __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy.
+ __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ __ bind(&non_function);
+ __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function.
+
+ // 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
// a0: actual number of arguments
// a1: function
- __ bind(&non_function);
- // Restore the function in case it has been modified.
+ // t0: call type (0: JS function, 1: function proxy, 2: non-function)
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
__ sw(a1, MemOperand(a2, -kPointerSize));
- // Clear a1 to indicate a non-function being called.
- __ mov(a1, zero_reg);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// a0: actual number of arguments
// a1: function
+ // t0: call type (0: JS function, 1: function proxy, 2: non-function)
__ bind(&shift_arguments);
{ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
@@ -1309,14 +1406,26 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
// a0: actual number of arguments
// a1: function
- { Label function;
- __ Branch(&function, ne, a1, Operand(zero_reg));
- __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label function, non_proxy;
+ __ Branch(&function, eq, t0, Operand(zero_reg));
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ mov(a2, zero_reg);
__ SetCallKind(t1, CALL_AS_METHOD);
+ __ Branch(&non_proxy, ne, t0, Operand(1));
+
+ __ push(a1); // Re-add proxy object as additional argument.
+ __ Addu(a0, a0, Operand(1));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_proxy);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1350,134 +1459,158 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- __ EnterInternalFrame();
-
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
- __ push(a0);
- // Returns (in v0) number of arguments to copy to stack as Smi.
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying need to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
- // Make a2 the space we have left. The stack might already be overflowed
- // here which will cause a2 to become negative.
- __ subu(a2, sp, a2);
- // Check if the arguments will overflow the stack.
- __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison.
-
- // Out of stack space.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ push(a1);
- __ push(v0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
-
- // Change context eagerly to get the right global object if necessary.
- __ lw(a0, MemOperand(fp, kFunctionOffset));
- __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in a0.
- __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- Label call_to_object, use_global_receiver, push_receiver;
- __ lw(a0, MemOperand(fp, kRecvOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a2).
- __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
-
- // Compute the receiver in non-strict mode.
- __ And(t0, a0, Operand(kSmiTagMask));
- __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a1));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a2));
-
- // Check if the receiver is already a JavaScript object.
- // a0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Convert the receiver to a regular object.
- // a0: receiver
- __ bind(&call_to_object);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
- __ Branch(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // a0: receiver
- __ bind(&push_receiver);
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Branch(&entry);
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison.
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ push(a1);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(v0); // Limit.
+ __ mov(a1, zero_reg); // Initial index.
+ __ push(a1);
- // Load the current argument from the arguments array and push it to the
- // stack.
- // a0: current argument index
- __ bind(&loop);
- __ lw(a1, MemOperand(fp, kArgsOffset));
- __ push(a1);
- __ push(a0);
+ // Get the receiver.
+ __ lw(a0, MemOperand(fp, kRecvOffset));
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(v0);
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
- // Use inline caching to access the arguments.
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Addu(a0, a0, Operand(1 << kSmiTagSize));
- __ sw(a0, MemOperand(fp, kIndexOffset));
+ // Change context eagerly to get the right global object if necessary.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a1.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, kLimitOffset));
- __ Branch(&loop, ne, a0, Operand(a1));
- // Invoke the function.
- ParameterCount actual(a0);
- __ sra(a0, a0, kSmiTagSize);
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- // Tear down the internal frame and remove function, receiver and args.
- __ LeaveInternalFrame();
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a2).
+ __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(a0, &call_to_object);
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, kArgsOffset));
+ __ push(a1);
+ __ push(a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(a0);
+ __ sra(a0, a0, kSmiTagSize);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ frame_scope.GenerateLeaveFrame();
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(a1); // Add function proxy as last argument.
+ __ Addu(a0, a0, Operand(1));
+ __ li(a2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ // Tear down the internal frame and remove function, receiver and args.
+ }
+
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 521b8e58f..e7dda3fae 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -100,9 +100,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -190,6 +190,71 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: function.
+ // [sp + kPointerSize]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ v0, a1, a2, &gc, TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ lw(a3, MemOperand(sp, 0));
+
+ // Load the serialized scope info from the stack.
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ li(a2, Operand(Smi::FromInt(length)));
+ __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(a3, &after_sentinel);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ Assert(eq, message, a3, Operand(zero_reg));
+ }
+ __ lw(a3, GlobalObjectOperand());
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
+ __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
+ __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
+
+ // Copy the global object from the previous context.
+ __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, v0);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
// [sp]: constant elements.
@@ -197,7 +262,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [sp + (2 * kPointerSize)]: literals array.
// All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int elements_size = 0;
+ if (length_ > 0) {
+ elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length_)
+ : FixedArray::SizeFor(length_);
+ }
int size = JSArray::kSize + elements_size;
// Load boilerplate object into r3 and check if we need to create a
@@ -218,6 +288,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
@@ -257,6 +330,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
// Copy the elements array.
+ ASSERT((elements_size % kPointerSize) == 0);
__ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
}
@@ -615,7 +689,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
- FPURegister double_dst,
+ DoubleRegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -651,25 +725,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
// Load the double value.
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
- // On MIPS a lot of things cannot be implemented the same way so right
- // now it makes a lot more sense to just do things manually.
-
- // Save FCSR.
- __ cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
- __ trunc_w_d(single_scratch, double_dst);
- // Retrieve FCSR.
- __ cfc1(scratch2, FCSR);
- // Restore FCSR.
- __ ctc1(scratch1, FCSR);
-
- // Check for inexact conversion or exception.
- __ And(scratch2, scratch2, kFCSRFlagMask);
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ double_dst,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
if (destination == kCoreRegisters) {
__ Move(dst1, dst2, double_dst);
@@ -706,7 +771,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- FPURegister double_scratch,
+ DoubleRegister double_scratch,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -735,27 +800,19 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Load the double value.
__ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
- // On MIPS a lot of things cannot be implemented the same way so right
- // now it makes a lot more sense to just do things manually.
-
- // Save FCSR.
- __ cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
- __ trunc_w_d(double_scratch, double_scratch);
- // Retrieve FCSR.
- __ cfc1(scratch2, FCSR);
- // Restore FCSR.
- __ ctc1(scratch1, FCSR);
-
- // Check for inexact conversion or exception.
- __ And(scratch2, scratch2, kFCSRFlagMask);
+ FPURegister single_scratch = double_scratch.low();
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
// Get the result in the destination register.
- __ mfc1(dst, double_scratch);
+ __ mfc1(dst, single_scratch);
} else {
// Load the double value in the destination registers.
@@ -881,9 +938,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ Move(f12, a0, a1);
__ Move(f14, a2, a3);
}
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 4);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
// Store answer in the overwritable heap number.
if (!IsMipsSoftFloatABI) {
CpuFeatures::Scope scope(FPU);
@@ -901,6 +960,35 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+ // These variants are compiled ahead of time. See next method.
+ if (the_int_.is(a1) &&
+ the_heap_number_.is(v0) &&
+ scratch_.is(a2) &&
+ sign_.is(a3)) {
+ return true;
+ }
+ if (the_int_.is(a2) &&
+ the_heap_number_.is(v0) &&
+ scratch_.is(a3) &&
+ sign_.is(a0)) {
+ return true;
+ }
+ // Other register combinations are generated as and when they are needed,
+ // so it is unsafe to call them from stubs (we can't generate a stub while
+ // we are generating a stub).
+ return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+ WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
+ WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
+ stub1.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
// See comment for class, this does NOT work for int32's that are in Smi range.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1068,8 +1156,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
(lhs.is(a1) && rhs.is(a0)));
Label lhs_is_smi;
- __ And(t0, lhs, Operand(kSmiTagMask));
- __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+ __ JumpIfSmi(lhs, &lhs_is_smi);
// Rhs is a Smi.
// Check whether the non-smi is a heap number.
__ GetObjectType(lhs, t4, t4);
@@ -1258,7 +1345,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
if (!CpuFeatures::IsSupported(FPU)) {
__ push(ra);
- __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
+ __ PrepareCallCFunction(0, 2, t4);
if (!IsMipsSoftFloatABI) {
// We are not using MIPS FPU instructions, and parameters for the runtime
// function call are prepaired in a0-a3 registers, but function we are
@@ -1268,19 +1355,17 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ Move(f12, a0, a1);
__ Move(f14, a2, a3);
}
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+ 0, 2);
__ pop(ra); // Because this function returns int, result is in v0.
__ Ret();
} else {
CpuFeatures::Scope scope(FPU);
Label equal, less_than;
- __ c(EQ, D, f12, f14);
- __ bc1t(&equal);
- __ nop();
-
- __ c(OLT, D, f12, f14);
- __ bc1t(&less_than);
- __ nop();
+ __ BranchF(&equal, NULL, eq, f12, f14);
+ __ BranchF(&less_than, NULL, lt, f12, f14);
// Not equal, not less, not NaN, must be greater.
__ li(v0, Operand(GREATER));
@@ -1303,7 +1388,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1473,9 +1558,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ JumpIfSmi(probe, not_found);
__ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
__ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ c(EQ, D, f12, f14);
- __ bc1t(&load_result_from_cache);
- __ nop(); // bc1t() requires explicit fill of branch delay slot.
+ __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
__ Branch(not_found);
} else {
// Note that there is no cache check for non-FPU case, even though
@@ -1591,9 +1674,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ li(t2, Operand(EQUAL));
// Check if either rhs or lhs is NaN.
- __ c(UN, D, f12, f14);
- __ bc1t(&nan);
- __ nop();
+ __ BranchF(NULL, &nan, eq, f12, f14);
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
@@ -1711,88 +1792,144 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument in the tos_ register and returns its result in
+// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses FPU instructions.
CpuFeatures::Scope scope(FPU);
- Label false_result;
- Label not_heap_number;
- Register scratch0 = t5.is(tos_) ? t3 : t5;
-
- // undefined -> false
- __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
- __ Branch(&false_result, eq, tos_, Operand(scratch0));
-
- // Boolean -> its value
- __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
- __ Branch(&false_result, eq, tos_, Operand(scratch0));
- __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
- // "tos_" is a register and contains a non-zero value. Hence we implicitly
- // return true if the equal condition is satisfied.
- __ Ret(eq, tos_, Operand(scratch0));
-
- // Smis: 0 -> false, all other -> true
- __ And(scratch0, tos_, tos_);
- __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
- __ And(scratch0, tos_, Operand(kSmiTagMask));
- // "tos_" is a register and contains a non-zero value. Hence we implicitly
- // return true if the not equal condition is satisfied.
- __ Ret(eq, scratch0, Operand(zero_reg));
-
- // 'null' -> false
- __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
- __ Branch(&false_result, eq, tos_, Operand(scratch0));
-
- // HeapNumber => false if +0, -0, or NaN.
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, scratch0, Operand(at));
-
- __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ fcmp(f12, 0.0, UEQ);
-
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ movt(tos_, zero_reg);
- __ Ret();
+ Label patch;
+ const Register map = t5.is(tos_) ? t3 : t5;
- __ bind(&not_heap_number);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
- __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
- __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
-
- // JavaScript object => true.
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
-
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Check for string.
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
-
- // String value => false iff empty, i.e., length is zero.
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
- __ Ret();
+ // undefined -> false.
+ CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
+
+ // Boolean -> its value.
+ CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+ CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
- // Return 0 in "tos_" for false.
- __ bind(&false_result);
- __ mov(tos_, zero_reg);
+ // 'null' -> false.
+ CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
+
+ if (types_.Contains(SMI)) {
+ // Smis: 0 -> false, all other -> true
+ __ And(at, tos_, kSmiTagMask);
+ // tos_ contains the correct return value already
+ __ Ret(eq, at, Operand(zero_reg));
+ } else if (types_.NeedsMap()) {
+ // If we need a map later and have a Smi -> patch.
+ __ JumpIfSmi(tos_, &patch);
+ }
+
+ if (types_.NeedsMap()) {
+ __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+
+ if (types_.CanBeUndetectable()) {
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, at, Operand(1 << Map::kIsUndetectable));
+ // Undetectable -> false.
+ __ movn(tos_, zero_reg, at);
+ __ Ret(ne, at, Operand(zero_reg));
+ }
+ }
+
+ if (types_.Contains(SPEC_OBJECT)) {
+ // Spec object -> true.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // tos_ contains the correct non-zero return value already.
+ __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ }
+
+ if (types_.Contains(STRING)) {
+ // String value -> false iff empty.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Label skip;
+ __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ __ Ret(); // the string length is OK as the return value
+ __ bind(&skip);
+ }
+
+ if (types_.Contains(HEAP_NUMBER)) {
+ // Heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, map, Operand(at));
+ Label zero_or_nan, number;
+ __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ bind(&zero_or_nan);
+ __ mov(tos_, zero_reg);
+ __ bind(&number);
+ __ Ret();
+ __ bind(&not_heap_number);
+ }
+
+ __ bind(&patch);
+ GenerateTypeTransition(masm);
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+ Type type,
+ Heap::RootListIndex value,
+ bool result) {
+ if (types_.Contains(type)) {
+ // If we see an expected oddball, return its ToBoolean value tos_.
+ __ LoadRoot(at, value);
+ __ Subu(at, at, tos_); // This is a check for equality for the movz below.
+ // The value of a root is never NULL, so we can avoid loading a non-null
+ // value into tos_ when we want to return 'true'.
+ if (!result) {
+ __ movz(tos_, zero_reg, at);
+ }
+ __ Ret(eq, at, Operand(zero_reg));
+ }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ Move(a3, tos_);
+ __ li(a2, Operand(Smi::FromInt(tos_.code())));
+ __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
+ __ Push(a3, a2, a1);
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+ 3,
+ 1);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ MultiPush(kJSCallerSaved | ra.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ __ MultiPushFPU(kCallerSavedFPU);
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = a1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ __ MultiPopFPU(kCallerSavedFPU);
+ }
+
+ __ MultiPop(kJSCallerSaved | ra.bit());
__ Ret();
}
@@ -1951,12 +2088,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(a0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a1, v0);
- __ pop(a0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a1, v0);
+ __ pop(a0);
+ }
__ bind(&heapnumber_allocated);
__ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -1998,13 +2136,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(v0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a2, v0); // Move the new heap number into a2.
- // Get the heap number into v0, now that the new heap number is in a2.
- __ pop(v0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(v0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a2, v0); // Move the new heap number into a2.
+ // Get the heap number into v0, now that the new heap number is in a2.
+ __ pop(v0);
+ }
// Convert the heap number in v0 to an untagged integer in a1.
// This can't go slow-case because it's the same number we already
@@ -2115,6 +2254,9 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -2717,26 +2859,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Otherwise return a heap number if allowed, or jump to type
// transition.
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
- // On MIPS a lot of things cannot be implemented the same way so right
- // now it makes a lot more sense to just do things manually.
-
- // Save FCSR.
- __ cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
- __ trunc_w_d(single_scratch, f10);
- // Retrieve FCSR.
- __ cfc1(scratch2, FCSR);
- // Restore FCSR.
- __ ctc1(scratch1, FCSR);
-
- // Check for inexact conversion or exception.
- __ And(scratch2, scratch2, kFCSRFlagMask);
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ f10,
+ scratch1,
+ except_flag);
if (result_type_ <= BinaryOpIC::INT32) {
- // If scratch2 != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+ // If except_flag != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, except_flag, Operand(zero_reg));
}
// Check if the result fits in a smi.
@@ -2929,9 +3061,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ Ret();
} else {
// Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a1 as scratch. v0 is preserved and returned.
+ // a3 and a0 as scratch. v0 is preserved and returned.
__ mov(a0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
__ TailCallStub(&stub);
}
@@ -3225,10 +3357,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ lw(t0, MemOperand(cache_entry, 0));
__ lw(t1, MemOperand(cache_entry, 4));
__ lw(t2, MemOperand(cache_entry, 8));
- __ Addu(cache_entry, cache_entry, 12);
__ Branch(&calculate, ne, a2, Operand(t0));
__ Branch(&calculate, ne, a3, Operand(t1));
// Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into v0.
__ Drop(1);
@@ -3241,6 +3375,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
} // if (CpuFeatures::IsSupported(FPU))
__ bind(&calculate);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
__ TailCallExternalReference(ExternalReference(RuntimeFunction(),
@@ -3259,13 +3396,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Register a0 holds precalculated cache entry address; preserve
// it on the stack and pop it into register cache_entry after the
// call.
- __ push(cache_entry);
+ __ Push(cache_entry, a2, a3);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ pop(cache_entry);
+ __ Pop(cache_entry, a2, a3);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3283,10 +3420,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
__ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3299,14 +3437,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in f4 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ li(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
}
@@ -3317,22 +3456,31 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
__ push(ra);
__ PrepareCallCFunction(2, scratch);
if (IsMipsSoftFloatABI) {
- __ Move(v0, v1, f4);
+ __ Move(a0, a1, f4);
} else {
__ mov_d(f12, f4);
}
+ AllowExternalCallThatCantCauseGC scope(masm);
+ Isolate* isolate = masm->isolate();
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(
- ExternalReference::math_sin_double_function(masm->isolate()), 2);
+ ExternalReference::math_sin_double_function(isolate),
+ 0, 1);
break;
case TranscendentalCache::COS:
__ CallCFunction(
- ExternalReference::math_cos_double_function(masm->isolate()), 2);
+ ExternalReference::math_cos_double_function(isolate),
+ 0, 1);
+ break;
+ case TranscendentalCache::TAN:
+ __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+ 0, 1);
break;
case TranscendentalCache::LOG:
__ CallCFunction(
- ExternalReference::math_log_double_function(masm->isolate()), 2);
+ ExternalReference::math_log_double_function(isolate),
+ 0, 1);
break;
default:
UNIMPLEMENTED();
@@ -3347,6 +3495,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -3415,12 +3564,15 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumbermap,
&call_runtime);
__ push(ra);
- __ PrepareCallCFunction(3, scratch);
+ __ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()), 3);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
@@ -3443,15 +3595,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumbermap,
&call_runtime);
__ push(ra);
- __ PrepareCallCFunction(4, scratch);
+ __ PrepareCallCFunction(0, 2, scratch);
// ABI (o32) for func(double a, double b): a in f12, b in f14.
ASSERT(double_base.is(f12));
ASSERT(double_exponent.is(f14));
__ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 4);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0,
+ 2);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
@@ -3468,6 +3625,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub(kSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(v0);
}
@@ -3490,16 +3678,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// s1: pointer to the first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
+ Isolate* isolate = masm->isolate();
+
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, a1);
- __ CallCFunction(
- ExternalReference::perform_gc_function(masm->isolate()), 1);
+ __ PrepareCallCFunction(1, 0, a1);
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
if (always_allocate) {
__ li(a0, Operand(scope_depth));
__ lw(a1, MemOperand(a0));
@@ -3588,18 +3777,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
// Retrieve the pending exception and clear the variable.
- __ li(t0,
- Operand(ExternalReference::the_hole_value_location(masm->isolate())));
- __ lw(a3, MemOperand(t0));
+ __ li(a3, Operand(isolate->factory()->the_hole_value()));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ lw(v0, MemOperand(t0));
__ sw(a3, MemOperand(t0));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ Branch(throw_termination_exception, eq,
- v0, Operand(masm->isolate()->factory()->termination_exception()));
+ v0, Operand(isolate->factory()->termination_exception()));
// Handle normal exception.
__ jmp(throw_normal_exception);
@@ -3628,6 +3815,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Subu(s1, s1, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3680,7 +3868,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
+ Isolate* isolate = masm->isolate();
// Registers:
// a0: entry address
@@ -3699,8 +3888,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
CpuFeatures::Scope scope(FPU);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
}
+
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(FPU)) {
@@ -3715,7 +3907,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- masm->isolate())));
+ isolate)));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
// Setup frame pointer for the frame to be pushed.
@@ -3739,8 +3931,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
- masm->isolate());
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ lw(t2, MemOperand(t1));
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -3754,35 +3945,35 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&cont);
__ push(t0);
- // Call a faked try-block that does the invoke.
- __ bal(&invoke); // bal exposes branch delay slot.
- __ nop(); // Branch delay slot nop.
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
__ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bal(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ li(t0,
- Operand(ExternalReference::the_hole_value_location(masm->isolate())));
- __ lw(t1, MemOperand(t0));
+ __ li(t1, Operand(isolate->factory()->the_hole_value()));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ sw(t1, MemOperand(t0));
// Invoke the function by calling through JS entry trampoline builtin.
@@ -3805,7 +3996,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
+ isolate);
__ li(t0, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
@@ -3833,7 +4024,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptors from the stack.
__ pop(t1);
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- masm->isolate())));
+ isolate)));
__ sw(t1, MemOperand(t0));
// Reset the stack to the callee saved registers.
@@ -3857,11 +4048,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * object: a0 or at sp + 1 * kPointerSize.
// * function: a1 or at sp.
//
-// Inlined call site patching is a crankshaft-specific feature that is not
-// implemented on MIPS.
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register t0.
void InstanceofStub::Generate(MacroAssembler* masm) {
- // This is a crankshaft-specific feature that has not been implemented yet.
- ASSERT(!HasCallSiteInlineCheck());
// Call site inlining and patching implies arguments in registers.
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// ReturnTrueFalse is only implemented for inlined call sites.
@@ -3875,6 +4065,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register inline_site = t5;
const Register scratch = a2;
+ const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
+
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
@@ -3890,10 +4082,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
- __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(t1));
- __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(t1));
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(at));
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(at));
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -3901,7 +4093,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@@ -3913,7 +4105,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
- UNIMPLEMENTED_MIPS();
+ ASSERT(HasArgsInRegisters());
+ // Patch the (relocated) inlined map check.
+
+ // The offset was stored in t0 safepoint slot.
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
+ __ LoadFromSafepointRegisterSlot(scratch, t0);
+ __ Subu(inline_site, ra, scratch);
+ // Patch the relocated value to map.
+ __ PatchRelocatedValue(inline_site, scratch, map);
}
// Register mapping: a3 is object map and t0 is function prototype.
@@ -3939,7 +4139,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
- UNIMPLEMENTED_MIPS();
+ // Patch the call site to return true.
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ PatchRelocatedValue(inline_site, scratch, v0);
+
+ if (!ReturnTrueFalseObject()) {
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ __ mov(v0, zero_reg);
+ }
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -3948,8 +4157,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
- UNIMPLEMENTED_MIPS();
+ // Patch the call site to return false.
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ PatchRelocatedValue(inline_site, scratch, v0);
+
+ if (!ReturnTrueFalseObject()) {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
}
+
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
@@ -3986,10 +4204,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- __ EnterInternalFrame();
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
__ mov(a0, v0);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4411,10 +4630,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4427,6 +4642,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kSubjectOffset = 2 * kPointerSize;
static const int kJSRegExpOffset = 3 * kPointerSize;
+ Isolate* isolate = masm->isolate();
+
Label runtime, invoke_regexp;
// Allocation of registers for this function. These are in callee save
@@ -4442,9 +4659,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
- masm->isolate());
+ isolate);
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -4508,8 +4725,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ lw(a0, MemOperand(sp, kPreviousIndexOffset));
- __ And(at, a0, Operand(kSmiTagMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ JumpIfNotSmi(a0, &runtime);
__ Branch(&runtime, ls, a3, Operand(a0));
// a2: Number of capture registers
@@ -4525,7 +4741,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
FieldMemOperand(a0, JSArray::kElementsOffset));
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ Branch(&runtime, ne, a0, Operand(
- masm->isolate()->factory()->fixed_array_map()));
+ isolate->factory()->fixed_array_map()));
// Check that the last match info has space for the capture registers and the
// additional information.
__ lw(a0,
@@ -4542,7 +4758,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label seq_string;
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- // First check for flat string.
+ // First check for flat string. None of the following string type tests will
+ // succeed if kIsNotStringTag is set.
__ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ Branch(&seq_string, eq, a1, Operand(zero_reg));
@@ -4550,6 +4767,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// a0: instance type if Subject string
// regexp_data: RegExp data (FixedArray)
+ // a1: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
@@ -4559,9 +4777,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label cons_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
__ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
__ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
+ // Catch non-string subject (should already have been guarded against).
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ And(at, a1, Operand(kIsNotStringMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
// String is sliced.
__ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ sra(t0, t0, kSmiTagSize);
@@ -4616,7 +4840,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -4656,13 +4880,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+ ExternalReference::address_of_static_offsets_vector(isolate)));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
// and calculate the shift of the index (0 for ASCII and 1 for two byte).
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4715,11 +4938,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(
- ExternalReference::the_hole_value_location(masm->isolate())));
- __ lw(a1, MemOperand(a1, 0));
+ __ li(a1, Operand(isolate->factory()->the_hole_value()));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
@@ -4737,7 +4958,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+ __ li(v0, Operand(isolate->factory()->null_value()));
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -4757,20 +4978,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sw(a2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
- __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+ __ mov(a2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ a2,
+ t3,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ mov(a3, last_match_info_elements);
- __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ t3,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(masm->isolate());
+ ExternalReference::address_of_static_offsets_vector(isolate);
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
@@ -4895,8 +5125,25 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
+ // a1 : the function to call
+ Label slow, non_function;
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -4910,19 +5157,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&call, ne, t0, Operand(at));
// Patch the receiver on the stack with the global receiver object.
- __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
+ __ JumpIfSmi(a1, &non_function);
// Get the map of the function object.
__ GetObjectType(a1, a2, a2);
__ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -4950,8 +5193,22 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
+ // Check for function proxy.
+ __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // Put proxy as additional argument.
+ __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
+ __ bind(&non_function);
__ sw(a1, MemOperand(sp, argc_ * kPointerSize));
__ li(a0, Operand(argc_)); // Setup the number of arguments.
__ mov(a2, zero_reg);
@@ -5008,7 +5265,6 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label got_char_code;
Label sliced_string;
- ASSERT(!t0.is(scratch_));
ASSERT(!t0.is(index_));
ASSERT(!t0.is(result_));
ASSERT(!t0.is(object_));
@@ -5026,13 +5282,11 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
- __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+ __ Branch(index_out_of_range_, ls, t0, Operand(index_));
// We need special handling for non-flat strings.
STATIC_ASSERT(kSeqStringTag == 0);
@@ -5056,14 +5310,14 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kEmptyStringRootIndex);
__ Branch(&call_runtime_, ne, result_, Operand(t0));
- // Get the first of the two strings and load its instance type.
+ // Get the first of the two parts.
__ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ addu(scratch_, scratch_, result_);
+ __ Addu(index_, index_, result_);
__ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
@@ -5071,6 +5325,9 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t0, result_, Operand(kStringRepresentationMask));
@@ -5088,18 +5345,18 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// add without shifting since the smi tag size is the log2 of the
// number of bytes in a two-byte character.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ Addu(scratch_, object_, Operand(scratch_));
- __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ Addu(index_, object_, Operand(index_));
+ __ lhu(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
__ Branch(&got_char_code);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
- __ srl(t0, scratch_, kSmiTagSize);
- __ Addu(scratch_, object_, t0);
+ __ srl(t0, index_, kSmiTagSize);
+ __ Addu(index_, object_, t0);
- __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+ __ lbu(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ sll(result_, result_, kSmiTagSize);
@@ -5108,20 +5365,21 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- scratch_,
+ result_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
// Consumed by runtime conversion function:
- __ Push(object_, index_, index_);
+ __ Push(object_, index_);
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -5133,16 +5391,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(scratch_, v0);
-
- __ pop(index_);
+ __ Move(index_, v0);
__ pop(object_);
// Reload the instance type.
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ Branch(&got_smi_index_);
@@ -5194,7 +5450,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5220,76 +5477,13 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
@@ -5540,10 +5734,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
__ Branch(not_found, eq, undefined, Operand(candidate));
- // Must be null (deleted entry).
+ // Must be the hole (deleted entry).
if (FLAG_debug_code) {
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ Assert(eq, "oddball in symbol table is not undefined or null",
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, "oddball in symbol table is not undefined or the hole",
scratch, Operand(candidate));
}
__ jmp(&next_probe[i]);
@@ -5583,7 +5777,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
__ sll(hash, character, 10);
__ addu(hash, hash, character);
// hash ^= hash >> 6;
- __ sra(at, hash, 6);
+ __ srl(at, hash, 6);
__ xor_(hash, hash, at);
}
@@ -5597,7 +5791,7 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
__ sll(at, hash, 10);
__ addu(hash, hash, at);
// hash ^= hash >> 6;
- __ sra(at, hash, 6);
+ __ srl(at, hash, 6);
__ xor_(hash, hash, at);
}
@@ -5608,12 +5802,16 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ sll(at, hash, 3);
__ addu(hash, hash, at);
// hash ^= hash >> 11;
- __ sra(at, hash, 11);
+ __ srl(at, hash, 11);
__ xor_(hash, hash, at);
// hash += hash << 15;
__ sll(at, hash, 15);
__ addu(hash, hash, at);
+ uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+ __ li(at, Operand(kHashShiftCutOffMask));
+ __ and_(hash, hash, at);
+
// if (hash == 0) hash = 27;
__ ori(at, zero_reg, 27);
__ movz(hash, at, hash);
@@ -5850,15 +6048,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a3: from index (untagged smi)
// t2 (a.k.a. to): to (smi)
// t3 (a.k.a. from): from offset (smi)
- Label allocate_slice, sliced_string, seq_string;
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, a1, Operand(kStringRepresentationMask));
- __ Branch(&seq_string, eq, t4, Operand(zero_reg));
+ Label allocate_slice, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ And(t4, a1, Operand(kIsIndirectStringMask));
// External string. Jump to runtime.
- __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
+ __ Branch(&seq_or_external_string, eq, t4, Operand(zero_reg));
__ And(t4, a1, Operand(kSlicedNotConsMask));
__ Branch(&sliced_string, ne, t4, Operand(zero_reg));
@@ -5876,8 +6072,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
__ jmp(&allocate_slice);
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
__ mov(t1, v0);
__ bind(&allocate_slice);
@@ -6463,39 +6659,25 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ Subu(a2, a0, Operand(kHeapObjectTag));
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- Label fpu_eq, fpu_lt, fpu_gt;
- // Compare operands (test if unordered).
- __ c(UN, D, f0, f2);
- // Don't base result on status bits when a NaN is involved.
- __ bc1t(&unordered);
- __ nop();
-
- // Test if equal.
- __ c(EQ, D, f0, f2);
- __ bc1t(&fpu_eq);
- __ nop();
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+ Label fpu_eq, fpu_lt;
+ // Test if equal, and also handle the unordered/NaN case.
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
- // Test if unordered or less (unordered case is already handled).
- __ c(ULT, D, f0, f2);
- __ bc1t(&fpu_lt);
- __ nop();
+ // Test if less (unordered case is already handled).
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2);
- // Otherwise it's greater.
- __ bc1f(&fpu_gt);
- __ nop();
+ // Otherwise it's greater, so just fall thru, and return.
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(GREATER)); // In delay slot.
- // Return a result of -1, 0, or 1.
__ bind(&fpu_eq);
- __ li(v0, Operand(EQUAL));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(EQUAL)); // In delay slot.
__ bind(&fpu_lt);
- __ li(v0, Operand(LESS));
- __ Ret();
-
- __ bind(&fpu_gt);
- __ li(v0, Operand(GREATER));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(LESS)); // In delay slot.
__ bind(&unordered);
}
@@ -6646,12 +6828,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
- __ EnterInternalFrame();
- __ Push(a1, a0);
- __ li(t0, Operand(Smi::FromInt(op_)));
- __ push(t0);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op_)));
+ __ push(t0);
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6668,7 +6851,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// The saved ra is after the reserved stack space for the 4 args.
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
- if (FLAG_debug_code && EnableSlowAsserts()) {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
// In case of an error the return address may point to a memory area
// filled with kZapValue by the GC.
// Dereference the address and check for this.
@@ -6718,15 +6901,14 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) {
-// If names of slots in range from 1 to kProbes - 1 for the hash value are
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0) {
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
@@ -6739,20 +6921,17 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
__ lw(index, FieldMemOperand(properties, kCapacityOffset));
__ Subu(index, index, Operand(1));
__ And(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
- // index *= 3.
- __ mov(at, index);
- __ sll(index, index, 1);
+ __ sll(at, index, 1);
__ Addu(index, index, at);
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
Register tmp = properties;
-
__ sll(scratch0, index, 1);
__ Addu(tmp, properties, scratch0);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -6780,19 +6959,18 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
const int spill_mask =
(ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
- a2.bit() | a1.bit() | a0.bit());
+ a2.bit() | a1.bit() | a0.bit() | v0.bit());
__ MultiPush(spill_mask);
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
+ __ mov(at, v0);
__ MultiPop(spill_mask);
- __ Branch(done, eq, v0, Operand(zero_reg));
- __ Branch(miss, ne, v0, Operand(zero_reg));
- return result;
+ __ Branch(done, eq, at, Operand(zero_reg));
+ __ Branch(miss, ne, at, Operand(zero_reg));
}
@@ -6807,6 +6985,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
+ ASSERT(!elements.is(scratch1));
+ ASSERT(!elements.is(scratch2));
+ ASSERT(!name.is(scratch1));
+ ASSERT(!name.is(scratch2));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -6837,8 +7020,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(StringDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
- __ mov(at, scratch2);
- __ sll(scratch2, scratch2, 1);
+ __ sll(at, scratch2, 1);
__ Addu(scratch2, scratch2, at);
// Check if the key is identical to the name.
@@ -6850,23 +7032,32 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
const int spill_mask =
(ra.bit() | t2.bit() | t1.bit() | t0.bit() |
- a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+ a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
~(scratch1.bit() | scratch2.bit());
__ MultiPush(spill_mask);
- __ Move(a0, elements);
- __ Move(a1, name);
+ if (name.is(a0)) {
+ ASSERT(!elements.is(a1));
+ __ Move(a1, name);
+ __ Move(a0, elements);
+ } else {
+ __ Move(a0, elements);
+ __ Move(a1, name);
+ }
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
__ mov(scratch2, a2);
+ __ mov(at, v0);
__ MultiPop(spill_mask);
- __ Branch(done, ne, v0, Operand(zero_reg));
- __ Branch(miss, eq, v0, Operand(zero_reg));
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ Branch(miss, eq, at, Operand(zero_reg));
}
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// a1: key
@@ -6960,6 +7151,338 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { s2, s0, t3, EMIT_REMEMBERED_SET },
+ { s2, a2, t3, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+ // Also used in KeyedStoreIC::GenerateGeneric.
+ { a3, t0, t1, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { t0, a1, a2, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { a1, a2, a3, EMIT_REMEMBERED_SET },
+ { a3, a2, a1, EMIT_REMEMBERED_SET },
+ // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { a2, a1, a3, EMIT_REMEMBERED_SET },
+ { a3, a1, a2, EMIT_REMEMBERED_SET },
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { t0, a2, a3, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { a2, a3, t5, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { t2, a2, a0, EMIT_REMEMBERED_SET },
+ { a2, t2, t5, EMIT_REMEMBERED_SET },
+ // StoreArrayLiteralElementStub::Generate
+ { t1, a0, t2, EMIT_REMEMBERED_SET },
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+ return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two branch+nop instructions are generated with labels so as to
+ // get the offset fixed up correctly by the bind(Label*) call. We patch it
+ // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
+ // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
+ // incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
+ __ nop();
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
+ __ nop();
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(a0));
+ __ Move(address, regs_.address());
+ __ Move(a0, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(a1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ lw(a1, MemOperand(address, 0));
+ }
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : element value to store
+ // -- a1 : array literal
+ // -- a2 : map of array literal
+ // -- a3 : element index as smi
+ // -- t0 : array literal index in function as smi
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ __ CheckFastElements(a2, t1, &double_elements);
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(a0, &smi_element);
+ __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ // call.
+ __ Push(a1, a3, a0);
+ __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
+ __ Push(t1, t0);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t2, t1, t2);
+ __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a0, MemOperand(t2, 0));
+ // Update the write barrier for the array store.
+ __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t2, t1, t2);
+ __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
+ &slow_elements);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index aa224bcfa..e0954d837 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -59,6 +59,25 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -118,7 +137,7 @@ class UnaryOpStub: public CodeStub {
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -217,7 +236,7 @@ class BinaryOpStub: public CodeStub {
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
@@ -226,6 +245,70 @@ class BinaryOpStub: public CodeStub {
};
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -324,7 +407,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
- sign_(scratch2) { }
+ sign_(scratch2) {
+ ASSERT(IntRegisterBits::is_valid(the_int_.code()));
+ ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
+ ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
+ ASSERT(SignRegisterBits::is_valid(sign_.code()));
+ }
+
+ bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
private:
Register the_int_;
@@ -336,13 +427,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
+ class SignRegisterBits: public BitField<int, 12, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
+ | ScratchRegisterBits::encode(scratch_.code())
+ | SignRegisterBits::encode(sign_.code());
}
void Generate(MacroAssembler* masm);
@@ -375,6 +468,208 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+ masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
+ (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+ ASSERT(Assembler::IsBne(masm->instr_at(pos)));
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+ masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
+ (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+ ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ 2 * Assembler::kInstrSize);
+
+ if (Assembler::IsBeq(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(Assembler::IsBne(first_instruction));
+
+ if (Assembler::IsBeq(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(Assembler::IsBne(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL,
+ stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->pop(scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ masm->MultiPushFPU(kCallerSavedFPU);
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ masm->MultiPopFPU(kCallerSavedFPU);
+ }
+ masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ Register GetRegThatIsNotOneOf(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
@@ -561,14 +856,13 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0);
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
@@ -578,6 +872,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -590,7 +886,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 4400b643a..c94e0fa52 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -30,22 +30,287 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : target map, scratch for subsequent call
+ // -- t0 : scratch (elements)
+ // -----------------------------------
+ // Set transitioned map.
+ __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t5,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : target map, scratch for subsequent call
+ // -- t0 : scratch (elements)
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ bool fpu_supported = CpuFeatures::IsSupported(FPU);
+ __ push(ra);
+
+ Register scratch = t6;
+
+ __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ // t0: source FixedArray
+ // t1: number of elements (smi-tagged)
+
+ // Allocate new FixedDoubleArray.
+ __ sll(scratch, t1, 2);
+ __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
+ __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+ // t2: destination FixedDoubleArray, not tagged as heap object
+ __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
+ __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+ // Set destination FixedDoubleArray's length.
+ __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+ // Update receiver's map.
+
+ __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t5,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Addu(a3, t2, Operand(kHeapObjectTag));
+ __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ RecordWriteField(a2,
+ JSObject::kElementsOffset,
+ a3,
+ t5,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+
+ // Prepare for conversion loop.
+ __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
+ __ sll(t2, t1, 2);
+ __ Addu(t2, t2, t3);
+ __ li(t0, Operand(kHoleNanLower32));
+ __ li(t1, Operand(kHoleNanUpper32));
+ // t0: kHoleNanLower32
+ // t1: kHoleNanUpper32
+ // t2: end of destination FixedDoubleArray, not tagged
+ // t3: begin of FixedDoubleArray element fields, not tagged
+
+ if (!fpu_supported) __ Push(a1, a0);
+
+ __ Branch(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(ra);
+ __ Branch(fail);
+
+ // Convert and copy elements.
+ __ bind(&loop);
+ __ lw(t5, MemOperand(a3));
+ __ Addu(a3, a3, kIntSize);
+ // t5: current element
+ __ JumpIfNotSmi(t5, &convert_hole);
+
+ // Normal smi, convert to double and store.
+ __ SmiUntag(t5);
+ if (fpu_supported) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(t5, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(t3));
+ __ Addu(t3, t3, kDoubleSize);
+ } else {
+ FloatingPointHelper::ConvertIntToDouble(masm,
+ t5,
+ FloatingPointHelper::kCoreRegisters,
+ f0,
+ a0,
+ a1,
+ t7,
+ f0);
+ __ sw(a0, MemOperand(t3)); // mantissa
+ __ sw(a1, MemOperand(t3, kIntSize)); // exponent
+ __ Addu(t3, t3, kDoubleSize);
+ }
+ __ Branch(&entry);
+
+ // Hole found, store the-hole NaN.
+ __ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+ }
+ __ sw(t0, MemOperand(t3)); // mantissa
+ __ sw(t1, MemOperand(t3, kIntSize)); // exponent
+ __ Addu(t3, t3, kDoubleSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, t3, Operand(t2));
+
+ if (!fpu_supported) __ Pop(a1, a0);
+ __ pop(ra);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : target map, scratch for subsequent call
+ // -- t0 : scratch (elements)
+ // -----------------------------------
+ Label entry, loop, convert_hole, gc_required;
+ __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+ __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ // t0: source FixedArray
+ // t1: number of elements (smi-tagged)
+
+ // Allocate new FixedArray.
+ __ sll(a0, t1, 1);
+ __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
+ __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+ // t2: destination FixedArray, not tagged as heap object
+ __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+ __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+ // Set destination FixedDoubleArray's length.
+ __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+
+ // Prepare for conversion loop.
+ __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
+ __ Addu(t2, t2, Operand(kHeapObjectTag));
+ __ sll(t1, t1, 1);
+ __ Addu(t1, a3, t1);
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses.
+ // a3: begin of destination FixedArray element fields, not tagged
+ // t0: begin of source FixedDoubleArray element fields, not tagged, +4
+ // t1: end of destination FixedArray, not tagged
+ // t2: destination FixedArray
+ // t3: the-hole pointer
+ // t5: heap number map
+ __ Branch(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+ __ Branch(fail);
+
+ __ bind(&loop);
+ __ lw(a1, MemOperand(t0));
+ __ Addu(t0, t0, kDoubleSize);
+ // a1: current element's upper 32 bit
+ // t0: address of next element's upper 32 bit
+ __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
+ // a2: new heap number
+ __ lw(a0, MemOperand(t0, -12));
+ __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
+ __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
+ __ mov(a0, a3);
+ __ sw(a2, MemOperand(a3));
+ __ Addu(a3, a3, kIntSize);
+ __ RecordWrite(t2,
+ a0,
+ a2,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Branch(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ sw(t3, MemOperand(a3));
+ __ Addu(a3, a3, kIntSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, a3, Operand(t1));
+
+ __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
+ // Update receiver's map.
+ __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ RecordWriteField(a2,
+ HeapObject::kMapOffset,
+ a3,
+ t5,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ RecordWriteField(a2,
+ JSObject::kElementsOffset,
+ t2,
+ t5,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(ra);
}
+#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index a8de9c861..4549509f3 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -31,7 +31,6 @@
#include "ast.h"
-#include "code-stubs-mips.h"
#include "ic-inl.h"
namespace v8 {
@@ -71,21 +70,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- // This is in correlation with the padding in MacroAssembler::Abort.
- return FLAG_debug_code ? 45 : 20;
- }
-
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
-
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- // Magic number 5: instruction count after patched map load:
- // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
- return Isolate::Current()->inlined_write_barrier_size() + 5;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index d76ae59ff..4f486c1c0 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -50,13 +50,13 @@
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
+const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
// Not using floating-point coprocessor instructions. This flag is raised when
// -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
+const bool IsMipsSoftFloatABI = true;
#else
-static const bool IsMipsSoftFloatABI = true;
+const bool IsMipsSoftFloatABI = true;
#endif
@@ -74,46 +74,45 @@ namespace internal {
// Registers and FPURegisters.
// Number of general purpose registers.
-static const int kNumRegisters = 32;
-static const int kInvalidRegister = -1;
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
// Number of registers with HI, LO, and pc.
-static const int kNumSimuRegisters = 35;
+const int kNumSimuRegisters = 35;
// In the simulator, the PC register is simulated as the 34th register.
-static const int kPCRegister = 34;
+const int kPCRegister = 34;
// Number coprocessor registers.
-static const int kNumFPURegisters = 32;
-static const int kInvalidFPURegister = -1;
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
-static const int kFCSRRegister = 31;
-static const int kInvalidFPUControlRegister = -1;
-static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
// FCSR constants.
-static const uint32_t kFCSRInexactFlagBit = 2;
-static const uint32_t kFCSRUnderflowFlagBit = 3;
-static const uint32_t kFCSROverflowFlagBit = 4;
-static const uint32_t kFCSRDivideByZeroFlagBit = 5;
-static const uint32_t kFCSRInvalidOpFlagBit = 6;
-
-static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
-static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
-static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
-static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
-static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
-
-static const uint32_t kFCSRFlagMask =
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
kFCSRUnderflowFlagMask |
kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask |
kFCSRInvalidOpFlagMask;
-static const uint32_t kFCSRExceptionFlagMask =
- kFCSRFlagMask ^ kFCSRInexactFlagMask;
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
// Helper functions for converting between register numbers and names.
class Registers {
@@ -177,67 +176,66 @@ enum SoftwareInterruptCodes {
// instructions (see Assembler::stop()).
// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
// debugger.
-static const uint32_t kMaxWatchpointCode = 31;
-static const uint32_t kMaxStopCode = 127;
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
// ----- Fields offset and length.
-static const int kOpcodeShift = 26;
-static const int kOpcodeBits = 6;
-static const int kRsShift = 21;
-static const int kRsBits = 5;
-static const int kRtShift = 16;
-static const int kRtBits = 5;
-static const int kRdShift = 11;
-static const int kRdBits = 5;
-static const int kSaShift = 6;
-static const int kSaBits = 5;
-static const int kFunctionShift = 0;
-static const int kFunctionBits = 6;
-static const int kLuiShift = 16;
-
-static const int kImm16Shift = 0;
-static const int kImm16Bits = 16;
-static const int kImm26Shift = 0;
-static const int kImm26Bits = 26;
-static const int kImm28Shift = 0;
-static const int kImm28Bits = 28;
+const int kOpcodeShift = 26;
+const int kOpcodeBits = 6;
+const int kRsShift = 21;
+const int kRsBits = 5;
+const int kRtShift = 16;
+const int kRtBits = 5;
+const int kRdShift = 11;
+const int kRdBits = 5;
+const int kSaShift = 6;
+const int kSaBits = 5;
+const int kFunctionShift = 0;
+const int kFunctionBits = 6;
+const int kLuiShift = 16;
+
+const int kImm16Shift = 0;
+const int kImm16Bits = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits = 28;
// In branches and jumps immediate fields point to words, not bytes,
// and are therefore shifted by 2.
-static const int kImmFieldShift = 2;
-
-static const int kFsShift = 11;
-static const int kFsBits = 5;
-static const int kFtShift = 16;
-static const int kFtBits = 5;
-static const int kFdShift = 6;
-static const int kFdBits = 5;
-static const int kFCccShift = 8;
-static const int kFCccBits = 3;
-static const int kFBccShift = 18;
-static const int kFBccBits = 3;
-static const int kFBtrueShift = 16;
-static const int kFBtrueBits = 1;
+const int kImmFieldShift = 2;
+
+const int kFsShift = 11;
+const int kFsBits = 5;
+const int kFtShift = 16;
+const int kFtBits = 5;
+const int kFdShift = 6;
+const int kFdBits = 5;
+const int kFCccShift = 8;
+const int kFCccBits = 3;
+const int kFBccShift = 18;
+const int kFBccBits = 3;
+const int kFBtrueShift = 16;
+const int kFBtrueBits = 1;
// ----- Miscellaneous useful masks.
// Instruction bit masks.
-static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
-static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
-static const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
-static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
-static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
-static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
-static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
-static const int kFunctionFieldMask =
- ((1 << kFunctionBits) - 1) << kFunctionShift;
+const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
+const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
+const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-static const int kHiMask = 0xffff << 16;
-static const int kLoMask = 0xffff;
-static const int kSignMask = 0x80000000;
-static const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int kHiMask = 0xffff << 16;
+const int kLoMask = 0xffff;
+const int kSignMask = 0x80000000;
+const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
@@ -529,7 +527,7 @@ enum FPURoundingMode {
kRoundToMinusInf = RM
};
-static const uint32_t kFPURoundingModeMask = 3 << 0;
+const uint32_t kFPURoundingModeMask = 3 << 0;
enum CheckForInexactConversion {
kCheckForInexactConversion,
@@ -772,18 +770,18 @@ class Instruction {
// MIPS assembly various constants.
// C/C++ argument slots size.
-static const int kCArgSlotCount = 4;
-static const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
// JS argument slots size.
-static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
// Assembly builtins argument slots size.
-static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
-static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-static const int kDoubleAlignmentBits = 3;
-static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
-static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
+const int kDoubleAlignmentBits = 3;
+const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+const int kDoubleAlignmentMask = kDoubleAlignment - 1;
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index e323c505e..34e333d28 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -124,55 +124,58 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ And(at, reg, 0xc0000000);
- __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ And(at, reg, 0xc0000000);
+ __ Assert(
+ eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ }
+ __ sll(reg, reg, kSmiTagSize);
}
- __ sll(reg, reg, kSmiTagSize);
}
+ __ MultiPush(object_regs | non_object_regs);
}
- __ MultiPush(object_regs | non_object_regs);
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(a0, zero_reg); // No arguments.
- __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ MultiPop(object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ srl(reg, reg, kSmiTagSize);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ li(reg, kDebugZapValue);
+ __ mov(a0, zero_reg); // No arguments.
+ __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ srl(reg, reg, kSmiTagSize);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
+ }
}
}
- }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
@@ -256,11 +259,11 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- a1 : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0);
+ Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 18b623199..a27c61cb2 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -32,65 +32,748 @@
#include "full-codegen.h"
#include "safepoint-table.h"
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 32;
int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 3;
+ const int kCallInstructionSizeInWords = 4;
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- UNIMPLEMENTED();
+ HandleScope scope;
+ AssertNoAllocation no_allocation;
+
+ if (!function->IsOptimized()) return;
+
+ // Get the optimized code.
+ Code* code = function->code();
+ Address code_start_address = code->instruction_start();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+ RelocInfo::NONE);
+ int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+ ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+ ASSERT(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(call_address, call_size_in_words);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+
+ Isolate* isolate = code->GetIsolate();
+
+ // Add the deoptimizing code to the list.
+ DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
+
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
+ // Set the code for the function to non-optimized version.
+ function->ReplaceCode(function->shared()->code());
+
+ if (FLAG_trace_deopt) {
+ PrintF("[forced deoptimization: ");
+ function->PrintName();
+ PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
+ }
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ const int kInstrSize = Assembler::kInstrSize;
+ // This structure comes from FullCodeGenerator::EmitStackCheck.
+ // The call of the stack guard check has the following form:
+ // sltu at, sp, t0
+ // beq at, zero_reg, ok
+ // lui t9, <stack guard address> upper
+ // ori t9, <stack guard address> lower
+ // jalr t9
+ // nop
+ // ----- pc_after points here
+
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+ // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
+ CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+ patcher.masm()->addiu(at, zero_reg, 1);
+
+ // Replace the stack check address in the load-immediate (lui/ori pair)
+ // with the entry address of the replacement code.
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(check_code->entry()));
+ Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+ replacement_code->entry());
+
+ // We patched the code to the following form:
+ // addiu at, zero_reg, 1
+ // beq at, zero_reg, ok ;; Not changed
+ // lui t9, <on-stack replacement address> upper
+ // ori t9, <on-stack replacement address> lower
+ // jalr t9 ;; Not changed
+ // nop ;; Not changed
+ // ----- pc_after points here
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ // Exact opposite of the function above.
+ const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Assembler::IsAddImmediate(
+ Assembler::instr_at(pc_after - 6 * kInstrSize)));
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+ // Restore the sltu instruction so beq can be taken again.
+ CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+ patcher.masm()->sltu(at, sp, t0);
+
+ // Replace the on-stack replacement address in the load-immediate (lui/ori
+ // pair) with the entry address of the normal stack-check code.
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(replacement_code->entry()));
+ Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+ check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, check_code);
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+ ByteArray* translations = data->TranslationByteArray();
+ int length = data->DeoptCount();
+ for (int i = 0; i < length; i++) {
+ if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ TranslationIterator it(translations, data->TranslationIndex(i)->value());
+ int value = it.Next();
+ ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+ // Read the number of frames.
+ value = it.Next();
+ if (value == 1) return i;
+ }
+ }
+ UNREACHABLE();
+ return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
- UNIMPLEMENTED();
+ DeoptimizationInputData* data = DeoptimizationInputData::cast(
+ optimized_code_->deoptimization_data());
+ unsigned ast_id = data->OsrAstId()->value();
+
+ int bailout_id = LookupBailoutId(data, ast_id);
+ unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+ ByteArray* translations = data->TranslationByteArray();
+
+ TranslationIterator iterator(translations, translation_index);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ ASSERT(Translation::BEGIN == opcode);
+ USE(opcode);
+ int count = iterator.Next();
+ ASSERT(count == 1);
+ USE(count);
+
+ opcode = static_cast<Translation::Opcode>(iterator.Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ unsigned node_id = iterator.Next();
+ USE(node_id);
+ ASSERT(node_id == ast_id);
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+ USE(function);
+ ASSERT(function == function_);
+ unsigned height = iterator.Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ USE(height_in_bytes);
+
+ unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned input_frame_size = input_->GetFrameSize();
+ ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+ unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+ unsigned outgoing_size = outgoing_height * kPointerSize;
+ unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+ ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
+ PrintF(" => node=%u, frame=%d->%d]\n",
+ ast_id,
+ input_frame_size,
+ output_frame_size);
+ }
+
+ // There's only one output frame in the OSR case.
+ output_count_ = 1;
+ output_ = new FrameDescription*[1];
+ output_[0] = new(output_frame_size) FrameDescription(
+ output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
+
+ // Clear the incoming parameters in the optimized frame to avoid
+ // confusing the garbage collector.
+ unsigned output_offset = output_frame_size - kPointerSize;
+ int parameter_count = function_->shared()->formal_parameter_count() + 1;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_[0]->SetFrameSlot(output_offset, 0);
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the incoming parameters. This may overwrite some of the
+ // incoming argument slots we've just cleared.
+ int input_offset = input_frame_size - kPointerSize;
+ bool ok = true;
+ int limit = input_offset - (parameter_count * kPointerSize);
+ while (ok && input_offset > limit) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Set them up explicitly.
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
+ uint32_t input_value = input_->GetFrameSlot(input_offset);
+ if (FLAG_trace_osr) {
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
+ output_offset,
+ input_value,
+ input_offset,
+ name);
+ }
+
+ output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+ input_offset -= kPointerSize;
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the rest of the frame.
+ while (ok && input_offset >= 0) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // If translation of any command failed, continue using the input frame.
+ if (!ok) {
+ delete output_[0];
+ output_[0] = input_;
+ output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+ } else {
+ // Setup the frame pointer and the context pointer.
+ output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
+ output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
+
+ unsigned pc_offset = data->OsrPcOffset()->value();
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ optimized_code_->entry() + pc_offset);
+ output_[0]->SetPc(pc);
+ }
+ Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
+ output_[0]->SetContinuation(
+ reinterpret_cast<uint32_t>(continuation->entry()));
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+ ok ? "finished" : "aborted",
+ reinterpret_cast<intptr_t>(function));
+ function->PrintName();
+ PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+ }
}
+// This code is very similar to ia32/arm code, but relies on register names
+// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
- UNIMPLEMENTED();
-}
+ // Read the ast node id, function, and frame height for this output frame.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ int node_id = iterator->Next();
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ uint32_t top_address;
+ if (is_bottommost) {
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) {
+ output_frame->SetRegister(fp.code(), fp_value);
+ }
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (is_topmost) {
+ output_frame->SetRegister(cp.code(), value);
+ }
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost && bailout_type_ != DEBUGGER) {
+ Builtins* builtins = isolate_->builtins();
+ Code* continuation = (bailout_type_ == EAGER)
+ ? builtins->builtin(Builtins::kNotifyDeoptimized)
+ : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<uint32_t>(continuation->entry()));
+ }
+}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- UNIMPLEMENTED();
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
}
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
- UNIMPLEMENTED();
+ GeneratePrologue();
+
+ Isolate* isolate = masm()->isolate();
+
+ CpuFeatures::Scope scope(FPU);
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize =
+ kDoubleSize * FPURegister::kNumAllocatableRegisters;
+
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id from the stack.
+ __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object if possible (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register t0.
+ if (type() == EAGER) {
+ __ mov(a3, zero_reg);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else if (type() == OSR) {
+ __ mov(a3, ra);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else {
+ __ mov(a3, ra);
+ // Correct two words for bailout id and return address.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+ }
+
+ __ Subu(t0, fp, t0);
+
+ // Allocate a new deoptimizer object.
+ // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+ __ PrepareCallCFunction(6, t1);
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(a1, Operand(type())); // bailout type,
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
+ __ li(t1, Operand(ExternalReference::isolate_address()));
+ __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ lw(a2, MemOperand(sp, i * kPointerSize));
+ __ sw(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ sw(a2, MemOperand(a1, offset));
+ }
+ }
+
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the bailout id, eventually return address, and the saved registers
+ // from the stack.
+ if (type() == EAGER || type() == OSR) {
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else {
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+ }
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Addu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(t0);
+ __ sw(t0, MemOperand(a3, 0));
+ __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
+ __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
+
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop;
+ // Outer loop state: a0 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
+ __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
+ __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ lw(a2, MemOperand(a0, 0)); // output_[ix]
+ __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ bind(&inner_push_loop);
+ __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+ __ Addu(t2, a2, Operand(a3));
+ __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+ __ push(t3);
+ __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Addu(a0, a0, Operand(kPointerSize));
+ __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+
+
+ // Push state, pc, and continuation from the last output frame.
+ if (type() != OSR) {
+ __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+ __ push(t2);
+ }
+
+ __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(t2);
+ __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(t2);
+
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ ASSERT(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ lw(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ // Set up the roots register.
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate);
+ __ li(roots, Operand(roots_array_start));
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UNIMPLEMENTED();
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+ // Create a sequence of deoptimization entries. Note that any
+ // registers may be still live.
+
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ if (type() != EAGER) {
+ // Emulate ia32 like call by pushing return address to stack.
+ __ push(ra);
+ }
+ __ li(at, Operand(i));
+ __ push(at);
+ __ Branch(&done);
+
+ // Pad the rest of the code.
+ while (table_entry_size_ > (masm()->pc_offset() - start)) {
+ __ nop();
+ }
+
+ ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start);
+ }
+ __ bind(&done);
}
+#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 2c838938b..9e626f377 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -36,9 +36,9 @@ namespace internal {
// Register lists.
// Note that the bit values must match those used in actual instruction
// encoding.
-static const int kNumRegs = 32;
+const int kNumRegs = 32;
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
1 << 2 | // v0
1 << 3 | // v1
1 << 4 | // a0
@@ -54,7 +54,7 @@ static const RegList kJSCallerSaved =
1 << 14 | // t6
1 << 15; // t7
-static const int kNumJSCallerSaved = 14;
+const int kNumJSCallerSaved = 14;
// Return the code of the n-th caller-saved register available to JavaScript
@@ -63,7 +63,7 @@ int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript.
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
1 << 16 | // s0
1 << 17 | // s1
1 << 18 | // s2
@@ -74,9 +74,9 @@ static const RegList kCalleeSaved =
1 << 23 | // s7 (cp in Javascript code)
1 << 30; // fp/s8
-static const int kNumCalleeSaved = 9;
+const int kNumCalleeSaved = 9;
-static const RegList kCalleeSavedFPU =
+const RegList kCalleeSavedFPU =
1 << 20 | // f20
1 << 22 | // f22
1 << 24 | // f24
@@ -84,23 +84,37 @@ static const RegList kCalleeSavedFPU =
1 << 28 | // f28
1 << 30; // f30
-static const int kNumCalleeSavedFPU = 6;
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU =
+ 1 << 0 | // f0
+ 1 << 2 | // f2
+ 1 << 4 | // f4
+ 1 << 6 | // f6
+ 1 << 8 | // f8
+ 1 << 10 | // f10
+ 1 << 12 | // f12
+ 1 << 14 | // f14
+ 1 << 16 | // f16
+ 1 << 18; // f18
+
+
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
-static const int kNumSafepointRegisters = 24;
+const int kNumSafepointRegisters = 24;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-static const int kUndefIndex = -1;
+const int kUndefIndex = -1;
// Map with indexes on stack that corresponds to codes of saved registers.
-static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {
kUndefIndex, // zero_reg
kUndefIndex, // at
0, // v0
@@ -140,13 +154,13 @@ static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kStateOffset = 1 * kPointerSize;
- static const int kContextOffset = 2 * kPointerSize;
- static const int kFPOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = kFPOffset + kPointerSize;
};
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 9a210c49e..201e6b8e1 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -47,6 +47,7 @@
#include "stub-cache.h"
#include "mips/code-stubs-mips.h"
+#include "mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -54,17 +55,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
-// bit immediate value is used) is the delta from the pc to the first
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
// instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -103,7 +101,7 @@ class JumpPatchSite BASE_EMBEDDED {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
- __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+ __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
@@ -139,6 +137,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -153,7 +153,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// with undefined when called as functions (without an explicit
// receiver object). t1 is zero for method calls and non-zero for
// function calls.
- if (info->is_strict_mode() || info->is_native()) {
+ if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
@@ -162,6 +162,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
int locals_count = info->scope()->num_stack_slots();
__ Push(ra, fp, cp, a1);
@@ -207,14 +212,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ li(a1, Operand(Context::SlotOffset(var->index())));
- __ addu(a2, cp, a1);
- __ sw(a0, MemOperand(a2, 0));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(a2, cp);
- __ RecordWrite(a2, a1, a3);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ sw(a0, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
}
}
}
@@ -242,7 +245,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict_mode()) {
+ if (!is_classic_mode()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -272,7 +275,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -310,17 +316,25 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+ // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+ // to make sure it is constant. Branch may emit a skip-or-jump sequence
+ // instead of the normal Branch. It seems that the "skip" part of that
+ // sequence is about as long as this Branch would be so it is safe to ignore
+ // that.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
+ __ sltu(at, sp, t0);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
StackCheckStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
- __ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -393,7 +407,7 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -416,7 +430,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -451,7 +465,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -510,7 +524,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -577,7 +591,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -670,15 +684,17 @@ void FullCodeGenerator::SetVar(Variable* var,
__ sw(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWrite(scratch0,
- Operand(Context::SlotOffset(var->index())),
- scratch1,
- src);
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs);
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -689,13 +705,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ Branch(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -705,13 +715,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@@ -723,7 +735,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ sw(result_register(), StackOperand(variable));
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ sw(t0, StackOperand(variable));
@@ -750,10 +762,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ sw(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ mov(a1, cp);
- __ RecordWrite(a1, Operand(offset), a2, result_register());
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ a2,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ sw(at, ContextOperand(cp, variable->index()));
@@ -765,11 +783,13 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration");
__ li(a2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+ ? READ_ONLY : NONE;
__ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -779,7 +799,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
__ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
__ Push(cp, a2, a1, a0);
} else {
@@ -922,11 +942,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(a0);
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
+ Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = t2;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -1000,9 +1026,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register v0. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ li(a1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
- __ Push(a1, v0);
+ __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
+ __ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
+ __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
__ Push(a1, a0); // Fixed array length (as smi) and initial index.
@@ -1021,17 +1054,22 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ addu(t0, a2, t0); // Array base + scaled (smi) index.
__ lw(a3, MemOperand(t0)); // Current entry.
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register a2.
__ lw(a2, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ lw(a1, MemOperand(sp, 4 * kPointerSize));
__ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, t0, Operand(a2));
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ __ Branch(&update_each, eq, a2, Operand(zero_reg));
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1086,7 +1124,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->language_mode());
__ li(a0, Operand(info));
__ push(a0);
__ CallStub(&stub);
@@ -1117,7 +1155,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1129,7 +1167,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1171,7 +1209,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1201,17 +1239,26 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ Branch(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
+ if (local->mode() == CONST) {
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
+ } else { // LET || CONST_HARMONY
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ Branch(done);
}
@@ -1244,26 +1291,66 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- GetVar(v0, var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == Variable::LET) {
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: Undefined if TheHole.
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(v0, var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move: Undefined if TheHole.
+ }
+ context()->Plug(v0);
+ break;
}
- context()->Plug(v0);
}
+ context()->Plug(var);
break;
}
@@ -1387,9 +1474,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->handle()));
__ lw(a1, MemOperand(sp));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1448,13 +1535,21 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(expr->constant_elements()));
+ __ li(a1, Operand(constant_elements));
__ Push(a3, a2, a1);
- if (expr->constant_elements()->map() ==
+ if (constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1466,8 +1561,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1490,15 +1591,59 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- // Store the subexpression value in the array's elements.
- __ lw(a1, MemOperand(sp)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ lw(t6, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
+ __ lw(a2, FieldMemOperand(t6, JSObject::kMapOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+ __ CheckFastElements(a2, a3, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(result_register(), &smi_element);
+ __ CheckFastSmiOnlyElements(a2, a3, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ __ push(t6); // Copy of array literal.
+ __ li(a1, Operand(Smi::FromInt(i)));
+ __ li(a2, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ li(a3, Operand(Smi::FromInt(strict_mode_flag))); // Strict mode.
+ __ Push(a1, result_register(), a2, a3);
+ __ CallRuntime(Runtime::kSetProperty, 5);
+ __ Branch(&element_done);
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ li(a3, Operand(Smi::FromInt(i)));
+ __ StoreNumberToDoubleElements(result_register(), a3, t6, a1, t0, t1, t5,
+ t3, &slow_elements);
+ __ Branch(&element_done);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
__ sw(result_register(), FieldMemOperand(a1, offset));
+ // Update the write barrier for the array store.
- // Update the write barrier for the array store with v0 as the scratch
- // register.
- __ RecordWrite(a1, Operand(offset), a2, result_register());
+ __ RecordWriteField(
+ a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Branch(&element_done);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ sw(result_register(), FieldMemOperand(a1, offset));
+ // Fall through
+
+ __ bind(&element_done);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1632,7 +1777,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1641,7 +1786,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1790,9 +1935,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic);
break;
}
@@ -1803,9 +1948,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(a1, result_register());
__ pop(a2);
__ pop(a0); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic);
break;
}
@@ -1822,9 +1967,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -1850,12 +1995,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(v0); // Value.
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, a1, a0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
@@ -1875,12 +2020,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(a3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(a1, Operand(offset), a2, a3);
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1893,13 +2040,15 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ sw(v0, location);
if (var->IsContextSlot()) {
__ mov(a3, v0);
- __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(v0); // Value.
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, a1, a0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
@@ -1937,9 +2086,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(a1);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1989,9 +2138,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(a2);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -2097,6 +2246,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2105,8 +2255,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
@@ -2115,22 +2264,20 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
__ push(a1);
- // Push the receiver of the enclosing function and do runtime call.
+ // Push the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(a1);
- // Push the strict mode flag. In harmony mode every eval call
- // is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
- __ li(a1, Operand(Smi::FromInt(strict_mode)));
+ // Push the language mode.
+ __ li(a1, Operand(Smi::FromInt(language_mode())));
__ push(a1);
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
+ // Push the start position of the scope the calls resides in.
+ __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+ __ push(a1);
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2164,28 +2311,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(v0);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(a1);
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in v0 (function) and
// v1 (receiver). Touch up the stack with the right values.
@@ -2195,6 +2325,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2308,7 +2439,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2320,7 +2452,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ And(t0, v0, Operand(kSmiTagMask));
Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -2328,7 +2460,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2340,7 +2473,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ And(at, v0, Operand(kSmiTagMask | 0x80000000));
Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
@@ -2348,7 +2481,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2370,7 +2504,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ Branch(if_false, ne, at, Operand(zero_reg));
__ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
if_true, if_false, fall_through);
@@ -2378,7 +2512,8 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2392,7 +2527,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
if_true, if_false, fall_through);
@@ -2400,7 +2535,8 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2416,7 +2552,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
__ And(at, a1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2424,8 +2560,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
-
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2501,12 +2637,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2520,7 +2657,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
__ Branch(if_false);
@@ -2528,7 +2665,8 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2542,7 +2680,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_ARRAY_TYPE),
if_true, if_false, fall_through);
@@ -2550,7 +2688,8 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2564,15 +2703,15 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2594,7 +2733,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
if_true, if_false, fall_through);
@@ -2602,7 +2741,8 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2617,14 +2757,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ pop(a1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in a1 and the formal
@@ -2638,9 +2779,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
__ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2660,7 +2800,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2671,18 +2812,23 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
__ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
- // Check if the constructor in the map is a function.
+ // Check if the constructor in the map is a JS function.
__ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
__ GetObjectType(v0, a1, a1);
__ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
@@ -2714,7 +2860,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2722,6 +2868,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2735,9 +2882,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2760,10 +2906,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(FPU)) {
__ PrepareCallCFunction(1, a0);
- __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
CpuFeatures::Scope scope(FPU);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
__ li(a1, Operand(0x41300000));
@@ -2778,7 +2924,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
} else {
__ PrepareCallCFunction(2, a0);
__ mov(a0, s0);
- __ li(a1, Operand(ExternalReference::isolate_address()));
+ __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -2787,9 +2934,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2799,9 +2947,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2812,7 +2961,8 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2831,8 +2981,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2842,7 +2993,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
@@ -2861,14 +3013,17 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+ __ mov(a2, v0);
+ __ RecordWriteField(
+ a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -2880,7 +3035,8 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2898,7 +3054,8 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2907,7 +3064,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Register object = a1;
Register index = a0;
- Register scratch = a2;
Register result = v0;
__ pop(object);
@@ -2917,7 +3073,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2946,7 +3101,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2955,8 +3111,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Register object = a1;
Register index = a0;
- Register scratch1 = a2;
- Register scratch2 = a3;
+ Register scratch = a3;
Register result = v0;
__ pop(object);
@@ -2966,8 +3121,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -2996,9 +3150,9 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3008,7 +3162,8 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -3020,10 +3175,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
@@ -3032,10 +3188,24 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
@@ -3044,10 +3214,11 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
@@ -3056,8 +3227,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3065,7 +3237,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3074,18 +3247,31 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
+
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(a1, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(v0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(v0);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3095,7 +3281,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3154,16 +3341,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ sw(scratch1, MemOperand(index2, 0));
__ sw(scratch2, MemOperand(index1, 0));
- Label new_space;
- __ InNewSpace(elements, scratch1, eq, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ scratch1,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask).
- __ mov(scratch1, elements);
- __ RecordWriteHelper(elements, index1, scratch2);
- __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index1,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index2,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3177,7 +3379,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3230,7 +3433,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = v0;
@@ -3246,8 +3450,7 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ Branch(&ok, eq, left, Operand(right));
// Fail if either is a non-HeapObject.
__ And(tmp, left, Operand(right));
- __ And(at, tmp, Operand(kSmiTagMask));
- __ Branch(&fail, eq, at, Operand(zero_reg));
+ __ JumpIfSmi(tmp, &fail);
__ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
__ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
__ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
@@ -3267,7 +3470,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
@@ -3280,14 +3484,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
__ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3302,12 +3507,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
-
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3592,7 +3797,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -3600,7 +3807,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
@@ -3643,18 +3850,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(v0);
+ __ jmp(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(v0);
+ __ bind(&done);
}
break;
}
@@ -3849,9 +4073,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->handle())); // Name.
__ pop(a1); // Receiver.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3867,9 +4091,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ pop(a1); // Key.
__ pop(a2); // Receiver.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3916,19 +4140,24 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(v0);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(v0, if_true);
@@ -3964,10 +4193,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, v0); // Leave map in a1.
- Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
- if_true, if_false, fall_through);
-
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
+ Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
+ if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
@@ -3986,18 +4216,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -4005,9 +4224,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
-
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4015,20 +4237,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
break;
@@ -4037,7 +4252,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
break;
@@ -4050,36 +4265,26 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cc = eq;
- __ mov(a0, result_register());
- __ pop(a1);
break;
case Token::LT:
cc = lt;
- __ mov(a0, result_register());
- __ pop(a1);
break;
case Token::GT:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cc = lt;
- __ mov(a1, result_register());
- __ pop(a0);
+ cc = gt;
break;
case Token::LTE:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cc = ge;
- __ mov(a1, result_register());
- __ pop(a0);
+ cc = le;
break;
case Token::GTE:
cc = ge;
- __ mov(a0, result_register());
- __ pop(a1);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
+ __ mov(a0, result_register());
+ __ pop(a1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4095,7 +4300,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
}
@@ -4106,8 +4311,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4115,18 +4321,23 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
__ mov(a0, result_register());
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- if (expr->is_strict()) {
+ __ LoadRoot(a1, nil_value);
+ if (expr->op() == Token::EQ_STRICT) {
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ Branch(if_true, eq, a0, Operand(a1));
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a1, other_nil_value);
__ Branch(if_true, eq, a0, Operand(a1));
- __ And(at, a0, Operand(kSmiTagMask));
- __ Branch(if_false, eq, at, Operand(zero_reg));
+ __ JumpIfSmi(a0, if_false);
// It can be an undetectable object.
__ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index a76c215a4..b057695f0 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -210,7 +210,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
}
@@ -383,10 +384,10 @@ Object* CallIC_Miss(Arguments args);
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- a1 : receiver
// -- a2 : name
@@ -396,7 +397,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
@@ -462,7 +463,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
}
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -485,10 +486,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -504,29 +505,29 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
+ // Push the receiver and the name of the function.
+ __ Push(a3, a2);
- // Call the entry.
- __ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ li(a0, Operand(2));
+ __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- __ LeaveInternalFrame();
+ // Move result to a1 and leave the internal frame.
+ __ mov(a1, v0);
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ andi(t0, a2, kSmiTagMask);
- __ Branch(&invoke, eq, t0, Operand(zero_reg));
+ __ JumpIfSmi(a2, &invoke);
__ GetObjectType(a2, a3, a3);
__ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
__ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -538,7 +539,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
__ bind(&invoke);
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -550,18 +551,6 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
@@ -577,27 +566,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -649,12 +617,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- __ EnterInternalFrame();
- __ push(a2); // Save the key.
- __ Push(a1, a2); // Pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a2); // Save the key.
+ __ Push(a1, a2); // Pass the receiver and the key.
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(a2); // Restore the key.
+ }
__ mov(a1, v0);
__ jmp(&do_call);
@@ -713,7 +682,7 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(a2, &miss);
__ IsObjectJSStringType(a2, a0, &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@@ -902,9 +871,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
__ sw(a0, mapped_location);
- // Verify mapped_location MemOperand is register, with no offset.
- ASSERT_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t5);
+ __ Addu(t2, a3, t1);
+ __ mov(t5, a0);
+ __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&notin);
@@ -912,8 +881,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
__ sw(a0, unmapped_location);
- ASSERT_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t5);
+ __ Addu(t2, a3, t0);
+ __ mov(t5, a0);
+ __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
@@ -1150,14 +1120,12 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register receiver = a1;
Register index = a0;
- Register scratch1 = a2;
- Register scratch2 = a3;
+ Register scratch = a3;
Register result = v0;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1201,109 +1169,144 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- a2 : receiver
// -- ra : return address
// -----------------------------------
-
- Label slow, fast, array, extra, exit;
+ Label slow, array, extra, check_if_double_array;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = a0;
Register key = a1;
Register receiver = a2;
Register elements = a3; // Elements array of the receiver.
- // t0 is used as ip in the arm version.
- // t3-t4 are used as temporaries.
+ Register elements_map = t2;
+ Register receiver_map = t3;
+ // t0 and t1 are used as general scratch registers.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
-
// Get the map of the object.
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+ __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
- __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
-
- __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+ __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
// Check that the object is some kind of JSObject.
- __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
- __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
- __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
// Object case: Check key against length in the elements array.
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
- __ Branch(&slow, ne, t3, Operand(t0));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast, lo, key, Operand(t0));
- // Fall thru to slow if un-tagged index >= length.
+ __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
// Slow case, handle jump to runtime.
__ bind(&slow);
-
// Entry registers are intact.
// a0: value.
// a1: key.
// a2: receiver.
-
GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
-
__ bind(&extra);
+ // Condition code from comparing key and array length is still available.
// Only support writing to array[array.length].
__ Branch(&slow, ne, key, Operand(t0));
// Check for room in the elements backing store.
// Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
+ __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(&check_if_double_array, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
// Calculate key + 1 as smi.
- STATIC_ASSERT(0 == kSmiTag);
- __ Addu(t3, key, Operand(Smi::FromInt(1)));
- __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&fast);
-
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(t0, key, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&fast_object_without_map_check);
+
+ __ bind(&check_if_double_array);
+ __ Branch(&slow, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ // Add 1 to key, and go to common element store code for doubles.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(t0, key, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
-
__ bind(&array);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
- __ Branch(&slow, ne, t3, Operand(t0));
// Check the key against the length in the array.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
// Fall through to fast case.
- __ bind(&fast);
- // Fast case, store the value to the elements backing store.
- __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t4, t4, Operand(t1));
- __ sw(value, MemOperand(t4));
- // Skip write barrier if the written value is a smi.
- __ JumpIfSmi(value, &exit);
-
+ __ bind(&fast_object_with_map_check);
+ Register scratch_value = t0;
+ Register address = t1;
+ __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(&fast_double_with_map_check, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, value);
+
+ __ bind(&non_smi_value);
+ // Escape to slow case when writing non-smi into smi-only array.
+ __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Fast elements array, store the value to the elements backing store.
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ Subu(t3, t4, Operand(elements));
-
- __ RecordWrite(elements, Operand(t3), t4, t5);
- __ bind(&exit);
-
- __ mov(v0, a0); // Return the value written.
+ __ mov(v0, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ value,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Ret();
+
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ Branch(&slow, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements,
+ t0,
+ t1,
+ t2,
+ t3,
+ &slow);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, value);
}
@@ -1382,6 +1385,47 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a2 : receiver
+ // -- a3 : target map
+ // -- ra : return address
+ // -----------------------------------
+ // Must return the modified receiver in v0.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a2);
+ __ bind(&fail);
+ }
+
+ __ push(a2);
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a2 : receiver
+ // -- a3 : target map
+ // -- ra : return address
+ // -----------------------------------
+ // Must return the modified receiver in v0.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a2);
+ __ bind(&fail);
+ }
+
+ __ push(a2);
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
@@ -1521,11 +1565,9 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return lt;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return lt;
+ return gt;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return ge;
+ return le;
case Token::GTE:
return ge;
default:
@@ -1572,7 +1614,8 @@ void PatchInlinedSmiCode(Address address) {
// If the instruction following the call is not a andi at, rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
- if (!Assembler::IsAndImmediate(instr)) {
+ if (!(Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
return;
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
new file mode 100644
index 000000000..c0879bbd9
--- /dev/null
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -0,0 +1,4651 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-codegen-mips.h"
+#include "mips/lithium-gap-resolver-mips.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const { }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ HPhase phase("Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+ CpuFeatures::Scope scope(FPU);
+
+ CodeStub::GenerateFPStubs();
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+ if (FLAG_trace_bailout) {
+ SmartArrayPointer<char> name(
+ info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ PrintF("\n");
+ }
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ size_t length = builder.position();
+ Vector<char> copy = Vector<char>::New(length + 1);
+ memcpy(copy.start(), builder.Finalize(), copy.length());
+ masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
+#endif
+
+ // a1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ if (FLAG_debug_code) {
+ __ li(a0, Operand(slots));
+ __ li(a2, Operand(kSlotsZapValue));
+ Label loop;
+ __ bind(&loop);
+ __ push(a2);
+ __ Subu(a0, a0, 1);
+ __ Branch(&loop, ne, a0, Operand(zero_reg));
+ } else {
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+ }
+ }
+
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in a1.
+ __ push(a1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in both v0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ lw(a0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextOperand(cp, var->index());
+ __ sw(a0, target);
+ // Update the write barrier. This clobbers a3 and a0.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ EnsureSpaceForLazyDeopt();
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+ ASSERT(is_generating());
+ bool emit_instructions = true;
+ for (current_instruction_ = 0;
+ !is_aborted() && current_instruction_ < instructions_->length();
+ current_instruction_++) {
+ LInstruction* instr = instructions_->at(current_instruction_);
+ if (instr->IsLabel()) {
+ LLabel* label = LLabel::cast(instr);
+ emit_instructions = !label->HasReplacement();
+ }
+
+ if (emit_instructions) {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ instr->CompileToNative(this);
+ }
+ }
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+ }
+ // Deferred code is the last part of the instruction sequence. Mark
+ // the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ // TODO(plind): not clear that this will have advantage for MIPS.
+ // Skipping it for now. Raised issue #100 for this.
+ Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+ return false;
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ ASSERT(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+ if (op->IsRegister()) {
+ return ToRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ __ li(scratch, ToOperand(op));
+ return scratch;
+ } else if (op->IsStackSlot() || op->IsArgument()) {
+ __ lw(scratch, ToMemOperand(op));
+ return scratch;
+ }
+ UNREACHABLE();
+ return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ FloatRegister flt_scratch,
+ DoubleRegister dbl_scratch) {
+ if (op->IsDoubleRegister()) {
+ return ToDoubleRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+ __ mtc1(at, flt_scratch);
+ __ cvt_d_w(dbl_scratch, flt_scratch);
+ return dbl_scratch;
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else if (r.IsTagged()) {
+ Abort("unsupported tagged immediate");
+ }
+ } else if (op->IsStackSlot() || op->IsArgument()) {
+ MemOperand mem_op = ToMemOperand(op);
+ __ ldc1(dbl_scratch, mem_op);
+ return dbl_scratch;
+ }
+ UNREACHABLE();
+ return dbl_scratch;
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+ ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+ value->Number());
+ return static_cast<int32_t>(value->Number());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ return Operand(static_cast<int32_t>(literal->Number()));
+ } else if (r.IsDouble()) {
+ Abort("ToOperand Unsupported double immediate.");
+ }
+ ASSERT(r.IsTagged());
+ return Operand(literal);
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort("ToOperand IsDoubleRegister unimplemented");
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, and
+ // context in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address.
+ return MemOperand(fp, -(index - 1) * kPointerSize);
+ }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, context,
+ // and the first word of the double in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address and the first word of
+ // the double.
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->values()->length();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ // spilled_registers_ and spilled_double_registers_ are either
+ // both NULL or both set.
+ if (environment->spilled_registers() != NULL && value != NULL) {
+ if (value->IsRegister() &&
+ environment->spilled_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(translation,
+ environment->spilled_registers()[value->index()],
+ environment->HasTaggedValueAt(i));
+ } else if (
+ value->IsDoubleRegister() &&
+ environment->spilled_double_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(
+ translation,
+ environment->spilled_double_registers()[value->index()],
+ false);
+ }
+ }
+
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+ LOperand* op,
+ bool is_tagged) {
+ if (op == NULL) {
+ // TODO(twuerthinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ translation->StoreArgumentsObject();
+ } else if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsArgument()) {
+ ASSERT(is_tagged);
+ int src_index = GetStackSlotCount() + op->index();
+ translation->StoreStackSlot(src_index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(literal);
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr) {
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ ASSERT(pointers != NULL);
+ RecordPosition(pointers->position());
+
+ __ CallRuntime(function, num_arguments);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ }
+ Translation translation(&translations_, frame_count);
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(entry != NULL);
+ if (entry == NULL) {
+ Abort("bailout was not prepared");
+ return;
+ }
+
+ ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
+
+ if (FLAG_deopt_every_n_times == 1 &&
+ info_->shared_info()->opt_count() == id) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ return;
+ }
+
+ if (FLAG_trap_on_deopt) {
+ Label skip;
+ if (cc != al) {
+ __ Branch(&skip, NegateCondition(cc), src1, src2);
+ }
+ __ stop("trap_on_deopt");
+ __ bind(&skip);
+ }
+
+ if (cc == al) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // TODO(plind): The Arm port is a little different here, due to their
+ // DeOpt jump table, which is not used for Mips yet.
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ ASSERT(FLAG_deopt);
+ Handle<DeoptimizationInputData> data =
+ factory()->NewDeoptimizationInputData(length, TENURED);
+
+ Handle<ByteArray> translations = translations_.CreateByteArray();
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal);
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length();
+ i < length;
+ i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+ kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
+ }
+ }
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ if (label->is_loop_header()) {
+ Comment(";;; B%d - LOOP entry", label->block_id());
+ } else {
+ Comment(";;; B%d", label->block_id());
+ }
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+ resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpConstructResult: {
+ RegExpConstructResultStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::NumberToString: {
+ NumberToStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringAdd: {
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::TranscendentalCache: {
+ __ lw(a0, MemOperand(sp, 0));
+ TranscendentalCacheStub stub(instr->transcendental_type(),
+ TranscendentalCacheStub::TAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ Register scratch = scratch0();
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register result = ToRegister(instr->result());
+
+ Label done;
+
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register scratch = scratch0();
+ ASSERT(!left.is(scratch));
+ __ mov(scratch, left);
+ int32_t p2constant = HConstant::cast(
+ instr->hydrogen()->right())->Integer32Value();
+ ASSERT(p2constant != 0);
+ // Result always takes the sign of the dividend (left).
+ p2constant = abs(p2constant);
+
+ Label positive_dividend;
+ __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
+ __ subu(result, zero_reg, left);
+ __ And(result, result, p2constant - 1);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(result, zero_reg, result);
+ __ bind(&positive_dividend);
+ __ And(result, scratch, p2constant - 1);
+ } else {
+ // div runs in the background while we check for special cases.
+ Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ __ div(left, right);
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ }
+
+ __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
+ __ mfhi(result);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register right = ToRegister(instr->InputAt(1));
+ const Register result = ToRegister(instr->result());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ div(left, right);
+
+ // Check for x / 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (-kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ __ mfhi(result);
+ DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->InputAt(0));
+ LOperand* right_op = instr->InputAt(1);
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right_op->IsConstantOperand() && !can_overflow) {
+ // Use optimized code for specific constants.
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ }
+
+ switch (constant) {
+ case -1:
+ __ Subu(result, zero_reg, left);
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ }
+ __ mov(result, zero_reg);
+ break;
+ case 1:
+ // Nothing to do.
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs - 1) ||
+ IsPowerOf2(constant_abs + 1)) {
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ sll(result, left, shift);
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ sll(result, left, shift);
+ __ Addu(result, result, left);
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ sll(result, left, shift);
+ __ Subu(result, result, left);
+ }
+
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) {
+ __ Subu(result, zero_reg, result);
+ }
+
+ } else {
+ // Generate standard code.
+ __ li(at, constant);
+ __ mul(result, left, at);
+ }
+ }
+
+ } else {
+ Register right = EmitLoadRegister(right_op, scratch);
+ if (bailout_on_minus_zero) {
+ __ Or(ToRegister(instr->TempAt(0)), left, right);
+ }
+
+ if (can_overflow) {
+ // hi:lo = left * right.
+ __ mult(left, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ __ sra(at, result, 31);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ } else {
+ __ mul(result, left, right);
+ }
+
+ if (bailout_on_minus_zero) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt,
+ instr->environment(),
+ ToRegister(instr->TempAt(0)),
+ Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left_op = instr->InputAt(0);
+ LOperand* right_op = instr->InputAt(1);
+ ASSERT(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
+
+ if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ right = Operand(EmitLoadRegister(right_op, at));
+ } else {
+ ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
+ }
+
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_OR:
+ __ Or(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Xor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (right_op->IsRegister()) {
+ // No need to mask the right operand on MIPS, it is built into the variable
+ // shift instructions.
+ switch (instr->op()) {
+ case Token::SAR:
+ __ srav(result, left, ToRegister(right_op));
+ break;
+ case Token::SHR:
+ __ srlv(result, left, ToRegister(right_op));
+ if (instr->can_deopt()) {
+ DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ __ sllv(result, left, ToRegister(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sra(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ srl(result, left, shift_count);
+ } else {
+ if (instr->can_deopt()) {
+ __ And(at, left, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ __ sll(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, at);
+ __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ if (right->IsStackSlot() ||
+ right->IsArgument() ||
+ right->IsConstantOperand()) {
+ Register right_reg = EmitLoadRegister(right, scratch);
+ __ SubuAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ right_reg,
+ overflow); // Reg at also used as scratch.
+ } else {
+ ASSERT(right->IsRegister());
+ // Due to overflow check macros not supporting constant operands,
+ // handling the IsConstantOperand case was moved to prev if clause.
+ __ SubuAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ ToRegister(right),
+ overflow); // Reg at also used as scratch.
+ }
+ DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(instr->result()->IsRegister());
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ double v = instr->value();
+ __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ ASSERT(instr->result()->IsRegister());
+ __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->TempAt(0));
+ Label done;
+
+ // If the object is a smi return the object.
+ __ Move(result, input);
+ __ JumpIfSmi(input, &done);
+
+ // If the object is not a value type, return the object.
+ __ GetObjectType(input, map, map);
+ __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
+ __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ Nor(result, zero_reg, Operand(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ __ push(input_reg);
+ CallRuntime(Runtime::kThrow, 1, instr);
+
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, at);
+ __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+ }
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ if (right->IsStackSlot() ||
+ right->IsArgument() ||
+ right->IsConstantOperand()) {
+ Register right_reg = EmitLoadRegister(right, scratch);
+ __ AdduAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ right_reg,
+ overflow); // Reg at also used as scratch.
+ } else {
+ ASSERT(right->IsRegister());
+ // Due to overflow check macros not supporting constant operands,
+ // handling the IsConstantOperand case was moved to prev if clause.
+ __ AdduAndCheckForOverflow(ToRegister(result),
+ ToRegister(left),
+ ToRegister(right),
+ overflow); // Reg at also used as scratch.
+ }
+ DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ switch (instr->op()) {
+ case Token::ADD:
+ __ add_d(result, left, right);
+ break;
+ case Token::SUB:
+ __ sub_d(result, left, right);
+ break;
+ case Token::MUL:
+ __ mul_d(result, left, right);
+ break;
+ case Token::DIV:
+ __ div_d(result, left, right);
+ break;
+ case Token::MOD: {
+ // Save a0-a3 on the stack.
+ RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ __ MultiPush(saved_regs);
+
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ SetCallCDoubleArguments(left, right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ GetCFunctionDoubleResult(result);
+
+ // Restore saved register.
+ __ MultiPop(saved_regs);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+ ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ // Other arch use a nop here, to signal that there is no inlined
+ // patchable code. Mips does not need the nop, since our marker
+ // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+ for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+ LLabel* label = chunk_->GetLabel(i);
+ if (!label->HasReplacement()) return i;
+ }
+ return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block,
+ Condition cc, Register src1, const Operand& src2) {
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ Branch(chunk_->GetAssemblyLabel(right_block),
+ NegateCondition(cc), src1, src2);
+ } else if (right_block == next_block) {
+ __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ } else {
+ __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+void LCodeGen::EmitBranchF(int left_block, int right_block,
+ Condition cc, FPURegister src1, FPURegister src2) {
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+ NegateCondition(cc), src1, src2);
+ } else if (right_block == next_block) {
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ } else {
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsInteger32()) {
+ Register reg = ToRegister(instr->InputAt(0));
+ EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ } else if (r.IsDouble()) {
+ DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->InputAt(0));
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(true_block, false_block, eq, reg, Operand(at));
+ } else if (type.IsSmi()) {
+ EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(false_label, eq, reg, Operand(at));
+ }
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(true_label, eq, reg, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ Branch(false_label, eq, reg, Operand(at));
+ }
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(false_label, eq, reg, Operand(at));
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ Branch(false_label, eq, reg, Operand(zero_reg));
+ __ JumpIfSmi(reg, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ And(at, reg, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ }
+
+ const Register map = scratch0();
+ if (expected.NeedsMap()) {
+ __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, at, Operand(1 << Map::kIsUndetectable));
+ __ Branch(false_label, ne, at, Operand(zero_reg));
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+ __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Branch(true_label, ne, at, Operand(zero_reg));
+ __ Branch(false_label);
+ __ bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ DoubleRegister dbl_scratch = double_scratch0();
+ Label not_heap_number;
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, map, Operand(at));
+ __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+ // Falls through if dbl_scratch == 0.
+ __ Branch(false_label);
+ __ bind(&not_heap_number);
+ }
+
+ // We've seen something for the first time -> deopt.
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ }
+ }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ block = chunk_->LookupDestination(block);
+ int next_block = GetNextEmittedBlock(current_block_);
+ if (block != next_block) {
+ __ jmp(chunk_->GetAssemblyLabel(block));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = kNoCondition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ FPURegister left_reg = ToDoubleRegister(left);
+ FPURegister right_reg = ToDoubleRegister(right);
+
+ // If a NaN is involved, i.e. the result is unordered,
+ // jump to false block label.
+ __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+ left_reg, right_reg);
+
+ EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+ } else {
+ Register cmp_left;
+ Operand cmp_right = Operand(0);
+
+ if (right->IsConstantOperand()) {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ cmp_left = ToRegister(right);
+ cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+ // We transposed the operands. Reverse the condition.
+ cond = ReverseCondition(cond);
+ } else {
+ cmp_left = ToRegister(left);
+ cmp_right = Operand(ToRegister(right));
+ }
+
+ EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ EmitBranch(true_block, false_block, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitBranch(true_block, false_block, eq, left,
+ Operand(instr->hydrogen()->right()));
+}
+
+
+
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->InputAt(0));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ EmitGoto(false_block);
+ return;
+ }
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(at, nil_value);
+ if (instr->kind() == kStrictEquality) {
+ EmitBranch(true_block, false_block, eq, reg, Operand(at));
+ } else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+ __ LoadRoot(at, other_nil_value); // In the delay slot.
+ __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+ __ JumpIfSmi(reg, false_label); // In the delay slot.
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(scratch, scratch, 1 << Map::kIsUndetectable);
+ EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object) {
+ __ JumpIfSmi(input, is_not_object);
+
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ Branch(is_object, eq, input, Operand(temp2));
+
+ // Load map.
+ __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+ __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+ // Load instance type and check that it is in object type range.
+ __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ Branch(is_not_object,
+ lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+ return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsObject(reg, temp1, temp2, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond, temp2,
+ Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+ __ GetObjectType(input, temp1, temp1);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsString(reg, temp1, false_label);
+
+ EmitBranch(true_block, false_block, true_cond, temp1,
+ Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ __ And(at, input_reg, kSmiTagMask);
+ EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+
+ EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ JumpIfSmi(input, false_label);
+
+ __ GetObjectType(input, scratch, scratch);
+ EmitBranch(true_block,
+ false_block,
+ BranchCondition(instr->hydrogen()),
+ scratch,
+ Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ lw(scratch,
+ FieldMemOperand(input, String::kHashFieldOffset));
+ __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+ EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with this instance class-name adr
+// returned in temp reg, available for comparison by the caller. Trashes the
+// temp registers, but not the input. Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+ Label* is_false,
+ Handle<String>class_name,
+ Register input,
+ Register temp,
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ __ JumpIfSmi(input, is_false);
+
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ __ GetObjectType(input, temp, temp2);
+ __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ } else {
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ GetObjectType(input, temp, temp2);
+ __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ GetObjectType(temp, temp2, temp2);
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
+ __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ } else {
+ __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(temp, FieldMemOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is a symbol because it's a literal.
+ // The name in the constructor is a symbol because of the way the context is
+ // booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are symbols it is sufficient to use an identity
+ // comparison.
+
+ // End with the address of this class_name instance in temp register.
+ // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+ EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = instr->true_block_id();
+ int false_block = instr->false_block_id();
+
+ __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ Label true_label, done;
+ ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
+ ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(v0));
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ __ Branch(&true_label, eq, result, Operand(zero_reg));
+ __ li(result, Operand(factory()->false_value()));
+ __ Branch(&done);
+ __ bind(&true_label);
+ __ li(result, Operand(factory()->true_value()));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(object.is(a0));
+ ASSERT(result.is(v0));
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = temp;
+ __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ __ li(at, Operand(factory()->the_hole_value()), true);
+ __ Branch(&cache_miss, ne, map, Operand(at));
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ li(result, Operand(factory()->the_hole_value()), true);
+ __ Branch(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ LoadRoot(temp, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, object, Operand(temp));
+
+ // String values is not instance of anything.
+ Condition cc = __ IsObjectStringType(object, temp, temp);
+ __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+ // Go to the deferred code.
+ __ Branch(deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(v0));
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(flags);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // Get the temp register reserved by the instruction. This needs to be t0 as
+ // its slot of the pushing of safepoint registers is used to communicate the
+ // offset to the location of the map check.
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(temp.is(t0));
+ __ li(InstanceofStub::right(), Operand(instr->function()));
+ static const int kAdditionalDelta = 7;
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ li(temp, Operand(delta * kPointerSize), true);
+ __ StoreToSafepointRegisterSlot(temp, temp);
+ }
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LEnvironment* env = instr->deoptimization_environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ // Put the result value into the result register slot and
+ // restore all registers.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+ Condition condition = ComputeCompareCondition(op);
+ // A minor optimization that relies on LoadRoot always emitting one
+ // instruction.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in v0.
+ __ push(v0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ __ Addu(sp, sp, Operand(sp_delta));
+ __ Jump(ra);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ __ li(a2, Operand(instr->name()));
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ // Load the cell.
+ __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch2,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
+ }
+
+ // Store the value.
+ __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+ // Cells are always in the remembered set.
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteField(scratch,
+ JSGlobalPropertyCell::kValueOffset,
+ value,
+ scratch2,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ lw(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ MemOperand target = ContextOperand(context, instr->slot_index());
+ __ sw(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch0(),
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ } else {
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ }
+}
+
+
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name) {
+ LookupResult lookup(isolate());
+ type->LookupInDescriptors(NULL, *name, &lookup);
+ ASSERT(lookup.IsProperty() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+ if (lookup.type() == FIELD) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else {
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ int map_count = instr->hydrogen()->types()->length();
+ Handle<String> name = instr->hydrogen()->name();
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label next;
+ __ Branch(&next, ne, scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&done);
+ __ bind(&next);
+ }
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ Branch(&generic, ne, scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&done);
+ __ bind(&generic);
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ // Name is always in a2.
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register scratch = scratch0();
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function. Load map into the
+ // result register.
+ __ GetObjectType(function, result, scratch);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+ // Get the prototype or initial map from the function.
+ __ lw(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ GetObjectType(result, scratch, scratch);
+ __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+ // Get the prototype from the initial map.
+ __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ Branch(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ __ bind(&non_instance);
+ __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
+ if (FLAG_debug_code) {
+ Label done, fail;
+ __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
+ __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
+ __ Branch(&done, eq, scratch, Operand(at));
+ // |scratch| still contains |input|'s map.
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+ __ Ext(scratch, scratch, Map::kElementsKindShift,
+ Map::kElementsKindBitCount);
+ __ Branch(&done, eq, scratch,
+ Operand(FAST_ELEMENTS));
+ __ Branch(&fail, lt, scratch,
+ Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ Branch(&done, le, scratch,
+ Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
+ Register to_reg = ToRegister(instr->result());
+ Register from_reg = ToRegister(instr->InputAt(0));
+ __ lw(to_reg, FieldMemOperand(from_reg,
+ ExternalArray::kExternalPointerOffset));
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ // Bailout index is not a valid argument index. Use unsigned check to get
+ // negative check for free.
+
+ // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
+ // as they do in Arm. It will save us an instruction.
+ DeoptimizeIf(ls, instr->environment(), length, Operand(index));
+
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them, add one more.
+ __ subu(length, length, index);
+ __ Addu(length, length, Operand(1));
+ __ sll(length, length, kPointerSizeLog2);
+ __ Addu(at, arguments, Operand(length));
+ __ lw(result, MemOperand(at, 0));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Load the result.
+ __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
+ __ addu(scratch, elements, scratch);
+ __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int shift_size =
+ ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ if (key_is_constant) {
+ __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(elements, elements, Operand(scratch));
+ __ Addu(elements, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+
+ __ ldc1(result, MemOperand(elements));
+}
+
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0()));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0()));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ MemOperand mem_operand(zero_reg);
+ if (key_is_constant) {
+ mem_operand = MemOperand(external_pointer,
+ constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, scratch, external_pointer);
+ mem_operand = MemOperand(scratch);
+ }
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a1));
+ ASSERT(ToRegister(instr->key()).is(a0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register scratch = scratch0();
+ Register temp = scratch1();
+ Register result = ToRegister(instr->result());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ movn(result, fp, temp); // move only if temp is not equal to zero (ne)
+ __ movz(result, scratch, temp); // move only if temp is equal to zero (eq)
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elem = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
+ __ Branch(&done, eq, fp, Operand(elem));
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(result,
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(a0)); // Used for parameter count.
+ ASSERT(function.is(a1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ lw(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for builtins.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ Branch(&global_object, eq, receiver, Operand(scratch));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+ // Deoptimize if the receiver is not a JS object.
+ __ And(scratch, receiver, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+ __ GetObjectType(receiver, scratch, scratch);
+ DeoptimizeIf(lt, instr->environment(),
+ scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&receiver_ok);
+
+ __ bind(&global_object);
+ __ lw(receiver, GlobalObjectOperand());
+ __ lw(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ bind(&receiver_ok);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ Move(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Addu(elements, elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+ __ sll(scratch, length, 2);
+ __ bind(&loop);
+ __ Addu(scratch, elements, scratch);
+ __ lw(scratch, MemOperand(scratch));
+ __ push(scratch);
+ __ Subu(length, length, Operand(1));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+ __ sll(scratch, length, 2);
+
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in receiver which is a0, as expected
+ // by InvokeFunction.
+ v8::internal::ParameterCount actual(receiver);
+ __ InvokeFunction(function, actual, CALL_FUNCTION,
+ safepoint_generator, CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->InputAt(0);
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort("DoPushArgument not implemented for double type.");
+ } else {
+ Register argument_reg = EmitLoadRegister(argument, at);
+ __ push(argument_reg);
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ LoadHeapObject(result, instr->hydrogen()->closure());
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ lw(result,
+ MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
+ Register result = ToRegister(instr->result());
+ __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int arity,
+ LInstruction* instr,
+ CallKind call_kind) {
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ }
+
+ // Set a0 to arguments count if adaption is not needed. Assumes that a0
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ li(a0, Operand(arity));
+ }
+
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ // Invoke function.
+ __ SetCallKind(t1, call_kind);
+ __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+
+ // Setup deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ // Restore context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ mov(a0, v0);
+ __ li(a1, Operand(instr->function()));
+ CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it.
+ __ Move(result, input);
+ __ And(at, exponent, Operand(HeapNumber::kSignMask));
+ __ Branch(&done, eq, at, Operand(zero_reg));
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(a1) ? a0 : a1;
+ Register tmp2 = input.is(a2) ? a0 : a2;
+ Register tmp3 = input.is(a3) ? a0 : a3;
+ Register tmp4 = input.is(t0) ? a0 : t0;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ Branch(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(v0))
+ __ mov(tmp1, v0);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+ __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ StoreToSafepointRegisterSlot(tmp1, result);
+ }
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+ __ mov(result, input);
+ ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
+ __ subu(result, zero_reg, input);
+ // Overflow if result is still negative, ie 0x80000000.
+ DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ FPURegister input = ToDoubleRegister(instr->InputAt(0));
+ FPURegister result = ToDoubleRegister(instr->result());
+ __ abs_d(result, input);
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ FPURegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register except_flag = ToRegister(instr->TempAt(0));
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ except_flag);
+
+ // Deopt if the operation did not succeed.
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(result, single_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ Label done;
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ __ mfc1(scratch1, input.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Label done, check_sign_on_zero;
+
+ // Extract exponent bits.
+ __ mfc1(result, input.high());
+ __ Ext(scratch,
+ result,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+ Label skip1;
+ __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+ __ mov(result, zero_reg);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Branch(&check_sign_on_zero);
+ } else {
+ __ Branch(&done);
+ }
+ __ bind(&skip1);
+
+ // The following conversion will not work with numbers
+ // outside of ]-2^32, 2^32[.
+ DeoptimizeIf(ge, instr->environment(), scratch,
+ Operand(HeapNumber::kExponentBias + 32));
+
+ // Save the original sign for later comparison.
+ __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+ __ Move(double_scratch0(), 0.5);
+ __ add_d(input, input, double_scratch0());
+
+ // Check sign of the result: if the sign changed, the input
+ // value was in ]0.5, 0[ and the result should be -0.
+ __ mfc1(result, input.high());
+ __ Xor(result, result, Operand(scratch));
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // ARM uses 'mi' here, which is 'lt'
+ DeoptimizeIf(lt, instr->environment(), result,
+ Operand(zero_reg));
+ } else {
+ Label skip2;
+ // ARM uses 'mi' here, which is 'lt'
+ // Negating it results in 'ge'
+ __ Branch(&skip2, ge, result, Operand(zero_reg));
+ __ mov(result, zero_reg);
+ __ Branch(&done);
+ __ bind(&skip2);
+ }
+
+ Register except_flag = scratch;
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ double_scratch0().low(),
+ input,
+ result,
+ except_flag);
+
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ __ mfc1(result, double_scratch0().low());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ Branch(&done, ne, result, Operand(zero_reg));
+ __ bind(&check_sign_on_zero);
+ __ mfc1(scratch, input.high());
+ __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch = double_scratch0();
+
+ // Add +0 to convert -0 to +0.
+ __ mtc1(zero_reg, double_scratch.low());
+ __ mtc1(zero_reg, double_scratch.high());
+ __ add_d(result, input, double_scratch);
+ __ sqrt_d(result, result);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ Register scratch = scratch0();
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left),
+ ToDoubleRegister(right));
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ } else if (exponent_type.IsInteger32()) {
+ ASSERT(ToRegister(right).is(a0));
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(1, 1, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(isolate()), 1, 1);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+ Register right_reg = ToRegister(right);
+
+ // Check for smi on the right hand side.
+ Label non_smi, call;
+ __ JumpIfNotSmi(right_reg, &non_smi);
+
+ // Untag smi and convert it to a double.
+ __ SmiUntag(right_reg);
+ FPURegister single_scratch = double_scratch0();
+ __ mtc1(right_reg, single_scratch);
+ __ cvt_d_w(result_reg, single_scratch);
+ __ Branch(&call);
+
+ // Heap number map check.
+ __ bind(&non_smi);
+ __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
+
+ // Prepare arguments and call C function.
+ __ bind(&call);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ }
+ // Store the result in the result register.
+ __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs:
+ DoMathAbs(instr);
+ break;
+ case kMathFloor:
+ DoMathFloor(instr);
+ break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
+ case kMathSqrt:
+ DoMathSqrt(instr);
+ break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathCos:
+ DoMathCos(instr);
+ break;
+ case kMathSin:
+ DoMathSin(instr);
+ break;
+ case kMathTan:
+ DoMathTan(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
+ default:
+ Abort("Unimplemented type of LUnaryMathOperation.");
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(a1));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+ __ li(a2, Operand(instr->name()));
+ CallCode(ic, mode, instr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ int arity = instr->arity();
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+ __ li(a2, Operand(instr->name()));
+ CallCode(ic, mode, instr);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ li(a1, Operand(instr->target()));
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ __ li(a0, Operand(instr->arity()));
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Register object = ToRegister(instr->object());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ int offset = instr->offset();
+
+ ASSERT(!object.is(value));
+
+ if (!instr->transition().is_null()) {
+ __ li(scratch, Operand(instr->transition()));
+ __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ }
+
+ // Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ if (instr->is_in_object()) {
+ __ sw(value, FieldMemOperand(object, offset));
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(object,
+ offset,
+ value,
+ scratch,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ } else {
+ __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ sw(value, FieldMemOperand(scratch, offset));
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Update the write barrier for the properties array.
+ // object is used as a scratch register.
+ __ RecordWriteField(scratch,
+ offset,
+ value,
+ object,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ // Name is always in a2.
+ __ li(a2, Operand(instr->name()));
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptimizeIf(hs,
+ instr->environment(),
+ ToRegister(instr->index()),
+ Operand(ToRegister(instr->length())));
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ __ And(at, value, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ sw(value, FieldMemOperand(elements, offset));
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ if (key_is_constant) {
+ __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, elements, Operand(scratch));
+ __ Addu(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister value(ToDoubleRegister(instr->value()));
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvt_s_d(double_scratch0(), value);
+ __ swc1(double_scratch0(), MemOperand(scratch0()));
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ sdc1(value, MemOperand(scratch0()));
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand(zero_reg);
+ Register scratch = scratch0();
+ if (key_is_constant) {
+ mem_operand = MemOperand(external_pointer,
+ constant_key * (1 << shift_size));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, scratch, external_pointer);
+ mem_operand = MemOperand(scratch);
+ }
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ sb(value, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sh(value, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sw(value, mem_operand);
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->object()).is(a2));
+ ASSERT(ToRegister(instr->key()).is(a1));
+ ASSERT(ToRegister(instr->value()).is(a0));
+
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ __ mov(ToRegister(instr->result()), object_reg);
+
+ Label not_applicable;
+ __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+ __ li(new_map_reg, Operand(to_map));
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(a2));
+ ASSERT(new_map_reg.is(a3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(a2));
+ ASSERT(new_map_reg.is(a3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register temp = scratch1();
+ Register string = ToRegister(instr->string());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ And(temp, result, kIsIndirectStringMask);
+ __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ And(temp, result, kSlicedNotConsMask);
+ __ Branch(&cons_string, eq, temp, Operand(zero_reg));
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ sra(temp, result, kSmiTagSize);
+ __ addu(index, index, temp);
+ __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded);
+
+ // Handle conses.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
+ __ Branch(deferred->entry(), ne, result, Operand(temp));
+ // Get the first of the two strings and load its instance type.
+ __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Check whether the string is sequential. The only non-sequential
+ // shapes we support have just been unwrapped above.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(temp, result, Operand(kStringRepresentationMask));
+ __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii_string;
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ And(temp, result, Operand(kStringEncodingMask));
+ __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ Label done;
+ __ Addu(result,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ sll(temp, index, 1);
+ __ Addu(result, result, temp);
+ __ lhu(result, MemOperand(result, 0));
+ __ Branch(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ Addu(result,
+ string,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(result, result, index);
+ __ lbu(result, MemOperand(result, 0));
+
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(v0);
+ }
+ __ SmiUntag(v0);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ ASSERT(!char_code.is(result));
+
+ __ Branch(deferred->entry(), hi,
+ char_code, Operand(String::kMaxAsciiCharCode));
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ sll(scratch, char_code, kPointerSizeLog2);
+ __ Addu(result, result, scratch);
+ __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Branch(deferred->entry(), eq, result, Operand(scratch));
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ lw(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ FPURegister single_scratch = double_scratch0().low();
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ lw(scratch, ToMemOperand(input));
+ __ mtc1(scratch, single_scratch);
+ } else {
+ __ mtc1(ToRegister(input), single_scratch);
+ }
+ __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI: public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagI* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+ Register overflow = scratch0();
+
+ DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ __ SmiTagCheckOverflow(reg, overflow);
+ __ BranchOnOverflow(deferred->entry(), overflow);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+ Label slow;
+ Register reg = ToRegister(instr->InputAt(0));
+ FPURegister dbl_scratch = double_scratch0();
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ Label done;
+ __ SmiUntag(reg);
+ __ Xor(reg, reg, Operand(0x80000000));
+ __ mtc1(reg, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ if (FLAG_inline_new) {
+ __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+ if (!reg.is(t1)) __ mov(reg, t1);
+ __ Branch(&done);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ StoreToSafepointRegisterSlot(zero_reg, reg);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ if (!reg.is(v0)) __ mov(reg, v0);
+
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ StoreToSafepointRegisterSlot(reg, reg);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
+
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ } else {
+ __ Branch(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ mov(reg, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+ __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register scratch = scratch0();
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ if (instr->needs_check()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ // If the input is a HeapObject, value of scratch won't be zero.
+ __ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
+ __ SmiUntag(ToRegister(input));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ SmiUntag(ToRegister(input));
+ }
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+ DoubleRegister result_reg,
+ bool deoptimize_on_undefined,
+ LEnvironment* env) {
+ Register scratch = scratch0();
+
+ Label load_smi, heap_number, done;
+
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi);
+
+ // Heap number map check.
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env, scratch, Operand(at));
+ } else {
+ Label heap_number;
+ __ Branch(&heap_number, eq, scratch, Operand(at));
+
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+
+ // Convert undefined to NaN.
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Branch(&done);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to double register conversion.
+ __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ Branch(&done);
+
+ // Smi to double register conversion
+ __ bind(&load_smi);
+ __ SmiUntag(input_reg); // Untag smi before converting to float.
+ __ mtc1(input_reg, result_reg);
+ __ cvt_d_w(result_reg, result_reg);
+ __ SmiTag(input_reg); // Retag smi.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ DoubleRegister double_scratch = double_scratch0();
+ FPURegister single_scratch = double_scratch.low();
+
+ ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // The input is a tagged HeapObject.
+ // Heap number map check.
+ __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ // This 'at' value and scratch1 map value are used for tests in both clauses
+ // of the if.
+
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->TempAt(1));
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+ ASSERT(!scratch3.is(input_reg) &&
+ !scratch3.is(scratch1) &&
+ !scratch3.is(scratch2));
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ Label heap_number;
+ __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+ ASSERT(ToRegister(instr->result()).is(input_reg));
+ __ mov(input_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&heap_number);
+ __ ldc1(double_scratch2,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ EmitECMATruncate(input_reg,
+ double_scratch2,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+
+ // Load the double value.
+ __ ldc1(double_scratch,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed.
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(input_reg, single_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+ __ mfc1(scratch1, double_scratch.high());
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ }
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ ASSERT(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ DoubleRegister result_reg = ToDoubleRegister(result);
+
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister double_scratch = double_scratch0();
+ FPURegister single_scratch = double_scratch0().low();
+
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->TempAt(1));
+ __ EmitECMATruncate(result_reg,
+ double_input,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
+ } else {
+ Register except_flag = scratch2;
+
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ single_scratch,
+ double_input,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Deopt if the operation did not succeed (except_flag != 0).
+ DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(result_reg, single_scratch);
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ __ GetObjectType(input, scratch, scratch);
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ } else {
+ DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ And(at, scratch, mask);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
+ at, Operand(zero_reg));
+ } else {
+ __ And(scratch, scratch, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ }
+ }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(instr->hydrogen()->target()));
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+ Register scratch = scratch0();
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+ __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ DeoptimizeIf(ne,
+ instr->environment(),
+ scratch,
+ Operand(instr->hydrogen()->map()));
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ DeoptimizeIf(ne, instr->environment(), input_reg,
+ Operand(factory()->undefined_value()));
+ __ mov(result_reg, zero_reg);
+ __ jmp(&done);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+ HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ jmp(&done);
+
+ // smi
+ __ bind(&is_smi);
+ __ SmiUntag(scratch, input_reg);
+ __ ClampUint8(result_reg, scratch);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ factory()->NewJSGlobalPropertyCell(object);
+ __ li(result, Operand(cell));
+ __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ } else {
+ __ li(result, Operand(object));
+ }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
+
+ Handle<JSObject> holder = instr->holder();
+ Handle<JSObject> current_prototype = instr->prototype();
+
+ // Load prototype object.
+ LoadHeapObject(temp1, current_prototype);
+
+ // Check prototype maps up to the holder.
+ while (!current_prototype.is_identical_to(holder)) {
+ __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+ DeoptimizeIf(ne,
+ instr->environment(),
+ temp2,
+ Operand(Handle<Map>(current_prototype->map())));
+ current_prototype =
+ Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+ // Load next prototype object.
+ LoadHeapObject(temp1, current_prototype);
+ }
+
+ // Check the holder map.
+ __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+ DeoptimizeIf(ne,
+ instr->environment(),
+ temp2,
+ Operand(Handle<Map>(current_prototype->map())));
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a1, Operand(constant_elements));
+ __ Push(a3, a2, a1);
+
+ // Pick the right runtime function or stub to call.
+ int length = instr->hydrogen()->length();
+ if (instr->hydrogen()->IsCopyOnWrite()) {
+ ASSERT(instr->hydrogen()->depth() == 1);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ } else {
+ FastCloneShallowArrayStub::Mode mode =
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+ __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a2, Operand(instr->hydrogen()->constant_properties()));
+ __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+ __ Push(t0, a3, a2, a1);
+
+ // Pick the right runtime function to call.
+ if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ } else {
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ }
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+ ASSERT(ToRegister(instr->result()).is(v0));
+ __ push(a0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ Label materialized;
+ // Registers will be used as follows:
+ // a3 = JS function.
+ // t3 = literals array.
+ // a1 = regexp literal.
+ // a0 = regexp literal clone.
+ // a2 and t0-t2 are used as temporaries.
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ int literal_offset = FixedArray::kHeaderSize +
+ instr->hydrogen()->literal_index() * kPointerSize;
+ __ lw(a1, FieldMemOperand(t3, literal_offset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&materialized, ne, a1, Operand(at));
+
+ // Create regexp literal using runtime function
+ // Result will be in v0.
+ __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(t1, Operand(instr->hydrogen()->pattern()));
+ __ li(t0, Operand(instr->hydrogen()->flags()));
+ __ Push(t3, t2, t1, t0);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ mov(a1, v0);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ Push(a1, a0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(a1);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ lw(a3, FieldMemOperand(a1, i));
+ __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
+ __ sw(a3, FieldMemOperand(v0, i));
+ __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
+ __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
+ }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(shared_info->language_mode());
+ __ li(a1, Operand(shared_info));
+ __ push(a1);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ li(a2, Operand(shared_info));
+ __ li(a1, Operand(pretenure
+ ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, a2, a1);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->result()).is(v0));
+ Register input = ToRegister(instr->InputAt(0));
+ __ push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Register cmp1 = no_reg;
+ Operand cmp2 = Operand(no_reg);
+
+ Condition final_branch_condition = EmitTypeofIs(true_label,
+ false_label,
+ input,
+ instr->type_literal(),
+ cmp1,
+ cmp2);
+
+ ASSERT(cmp1.is_valid());
+ ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+ }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name,
+ Register& cmp1,
+ Operand& cmp2) {
+ // This function utilizes the delay slot heavily. This is used to load
+ // values that are always usable without depending on the type of the input
+ // register.
+ Condition final_branch_condition = kNoCondition;
+ Register scratch = scratch0();
+ if (type_name->Equals(heap()->number_symbol())) {
+ __ JumpIfSmi(input, true_label);
+ __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ cmp1 = input;
+ cmp2 = Operand(at);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+ // input is an object so we can load the BitFieldOffset even if we take the
+ // other branch.
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ cmp1 = at;
+ cmp2 = Operand(zero_reg);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ cmp1 = at;
+ cmp2 = Operand(input);
+ final_branch_condition = eq;
+
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ cmp1 = at;
+ cmp2 = Operand(input);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ // The first instruction of JumpIfSmi is an And - it is safe in the delay
+ // slot.
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ cmp1 = at;
+ cmp2 = Operand(zero_reg);
+ final_branch_condition = ne;
+
+ } else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfSmi(input, false_label);
+ __ GetObjectType(input, scratch, input);
+ __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+ cmp1 = input;
+ cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = eq;
+
+ } else if (type_name->Equals(heap()->object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ }
+ // input is an object, it is safe to use GetObjectType in the delay slot.
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Still an object, so the InstanceType can be loaded.
+ __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+ __ Branch(USE_DELAY_SLOT, false_label,
+ gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Still an object, so the BitField can be loaded.
+ // Check for undetectable objects => false.
+ __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ And(at, at, 1 << Map::kIsUndetectable);
+ cmp1 = at;
+ cmp2 = Operand(zero_reg);
+ final_branch_condition = eq;
+
+ } else {
+ cmp1 = at;
+ cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
+ __ Branch(false_label);
+ }
+
+ return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp1, scratch0());
+
+ EmitBranch(true_block, false_block, eq, temp1,
+ Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+ ASSERT(!temp1.is(temp2));
+ // Get the frame pointer for the calling frame.
+ __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne, temp2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ EnsureSpaceForLazyDeopt();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+ Register object = ToRegister(instr->object());
+ Register key = ToRegister(instr->key());
+ Register strict = scratch0();
+ __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(object, key, strict);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+ Register obj = ToRegister(instr->object());
+ Register key = ToRegister(instr->key());
+ __ Push(key, obj);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&done, hs, sp, Operand(at));
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ EnsureSpaceForLazyDeopt();
+ __ bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+ EnsureSpaceForLazyDeopt();
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+ instr->SpilledDoubleRegisterArray());
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(osr_pc_offset_ == -1);
+ osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 2aec68456..c67b46be7 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -29,35 +29,405 @@
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#include "mips/lithium-mips.h"
-
+#include "mips/lithium-gap-resolver-mips.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
+class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : chunk_(chunk),
+ masm_(assembler),
+ info_(info),
+ current_block_(-1),
+ current_instruction_(-1),
+ instructions_(chunk->instructions()),
+ deoptimizations_(4),
+ deopt_jump_table_(4),
+ deoptimization_literals_(8),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ status_(UNUSED),
+ deferred_(8),
+ osr_pc_offset_(-1),
+ last_lazy_deopt_pc_(0),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // LOperand is loaded into dbl_scratch, unless already a double register.
+ DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+ FloatRegister flt_scratch,
+ DoubleRegister dbl_scratch);
+ int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
- bool GenerateCode() {
- UNIMPLEMENTED();
- return false;
- }
+ bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
- void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+ void FinishCode(Handle<Code> code);
+
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredNumberTagI(LNumberTagI* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ enum Status {
+ UNUSED,
+ GENERATING,
+ DONE,
+ ABORTED
+ };
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_generating() const { return status_ == GENERATING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ StrictModeFlag strict_mode_flag() const {
+ return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+ }
+
+ LChunk* chunk() const { return chunk_; }
+ Scope* scope() const { return scope_; }
+ HGraph* graph() const { return chunk_->graph(); }
+
+ Register scratch0() { return lithiumScratchReg; }
+ Register scratch1() { return lithiumScratchReg2; }
+ DoubleRegister double_scratch0() { return lithiumScratchDouble; }
+
+ int GetNextEmittedBlock(int block);
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true,
+ Label* if_false,
+ Handle<String> class_name,
+ Register input,
+ Register temporary,
+ Register temporary2);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
+
+ void Abort(const char* format, ...);
+ void Comment(const char* format, ...);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ bool GeneratePrologue();
+ bool GenerateBody();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in a1.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int arity,
+ LInstruction* instr,
+ CallKind call_kind);
+
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Register src1,
+ const Operand& src2);
+
+ void AddToTranslation(Translation* translation,
+ LOperand* op,
+ bool is_tagged);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ DoubleRegister ToDoubleRegister(int index) const;
+
+ // Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
+ void DoMathAbs(LUnaryMathOperation* instr);
+ void DoMathFloor(LUnaryMathOperation* instr);
+ void DoMathRound(LUnaryMathOperation* instr);
+ void DoMathSqrt(LUnaryMathOperation* instr);
+ void DoMathPowHalf(LUnaryMathOperation* instr);
+ void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathTan(LUnaryMathOperation* instr);
+ void DoMathCos(LUnaryMathOperation* instr);
+ void DoMathSin(LUnaryMathOperation* instr);
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordPosition(int position);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void EmitBranch(int left_block,
+ int right_block,
+ Condition cc,
+ Register src1,
+ const Operand& src2);
+ void EmitBranchF(int left_block,
+ int right_block,
+ Condition cc,
+ FPURegister src1,
+ FPURegister src2);
+ void EmitCmpI(LOperand* left, LOperand* right);
+ void EmitNumberUntagD(Register input,
+ DoubleRegister result,
+ bool deoptimize_on_undefined,
+ LEnvironment* env);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ // Returns two registers in cmp1 and cmp2 that can be used in the
+ // Branch instruction after EmitTypeofIs.
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name,
+ Register& cmp1,
+ Operand& cmp2);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp1, Register temp2);
+
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name);
+
+ struct JumpTableEntry {
+ explicit inline JumpTableEntry(Address entry)
+ : label(),
+ address(entry) { }
+ Label label;
+ Address address;
+ };
+
+ void EnsureSpaceForLazyDeopt();
+
+ LChunk* const chunk_;
+ MacroAssembler* const masm_;
+ CompilationInfo* const info_;
+
+ int current_block_;
+ int current_instruction_;
+ const ZoneList<LInstruction*>* instructions_;
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ Status status_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ int last_lazy_deopt_pc_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PushSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PushSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PopSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label *exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
new file mode 100644
index 000000000..8f7f89cf5
--- /dev/null
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -0,0 +1,309 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-gap-resolver-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = lithiumScratchReg;
+static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32),
+ root_index_(0),
+ in_cycle_(false),
+ saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+ kSavedDoubleValueRegister);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ sdc1(kSavedDoubleValueRegister,
+ cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ sw(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ lw(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ if (!destination_operand.OffsetIsInt16Encodable()) {
+ // 'at' is overwritten while saving the value to the destination.
+ // Therefore we can't use 'at'. It is OK if the read from the source
+ // destroys 'at', since that happens before the value is read.
+ // This uses only a single reg of the double reg-pair.
+ __ lwc1(kSavedDoubleValueRegister, source_operand);
+ __ swc1(kSavedDoubleValueRegister, destination_operand);
+ } else {
+ __ lw(at, source_operand);
+ __ sw(at, destination_operand);
+ }
+ } else {
+ __ lw(kSavedValueRegister, source_operand);
+ __ sw(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ Operand source_operand = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ __ li(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ li(kSavedValueRegister, source_operand);
+ __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ sdc1(source_register, destination_operand);
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ // kSavedDoubleValueRegister was used to break the cycle,
+ // but kSavedValueRegister is free.
+ MemOperand source_high_operand =
+ cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ lw(kSavedValueRegister, source_operand);
+ __ sw(kSavedValueRegister, destination_operand);
+ __ lw(kSavedValueRegister, source_high_operand);
+ __ sw(kSavedValueRegister, destination_high_operand);
+ } else {
+ __ ldc1(kSavedDoubleValueRegister, source_operand);
+ __ sdc1(kSavedDoubleValueRegister, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.h b/deps/v8/src/mips/lithium-gap-resolver-mips.h
new file mode 100644
index 000000000..2506e38c3
--- /dev/null
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
new file mode 100644
index 000000000..81a193a62
--- /dev/null
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -0,0 +1,2237 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ register_spills_[i] = NULL;
+ }
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ double_register_spills_[i] = NULL;
+ }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+ LOperand* spill_operand) {
+ ASSERT(spill_operand->IsStackSlot());
+ ASSERT(register_spills_[allocation_index] == NULL);
+ register_spills_[allocation_index] = spill_operand;
+}
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand) {
+ ASSERT(spill_operand->IsDoubleStackSlot());
+ ASSERT(double_register_spills_[allocation_index] == NULL);
+ double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ for (int i = 0; i < results_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ results_[i]->PrintTo(stream);
+ }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sll-t";
+ case Token::SAR: return "sra-t";
+ case Token::SHR: return "srl-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ InputAt(1)->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(0)->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ *hydrogen()->type_literal()->ToCString(),
+ true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+ stream->Add("/%s ", hydrogen()->OpName());
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+ stream->Add("[a2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+ SmartArrayPointer<char> name_string = name()->ToCString();
+ stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+ SmartArrayPointer<char> name_string = name()->ToCString();
+ stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+ : spill_slot_count_(0),
+ info_(info),
+ graph_(graph),
+ instructions_(32),
+ pointer_maps_(8),
+ inlined_closures_(1) {
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+ // Skip a slot if for a double-width slot.
+ if (is_double) spill_slot_count_++;
+ return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+ int index = GetNextSpillIndex(is_double);
+ if (is_double) {
+ return LDoubleStackSlot::Create(index);
+ } else {
+ return LStackSlot::Create(index);
+ }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+ HPhase phase("Mark empty blocks", this);
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ int first = block->first_instruction_index();
+ int last = block->last_instruction_index();
+ LInstruction* first_instr = instructions()->at(first);
+ LInstruction* last_instr = instructions()->at(last);
+
+ LLabel* label = LLabel::cast(first_instr);
+ if (last_instr->IsGoto()) {
+ LGoto* goto_instr = LGoto::cast(last_instr);
+ if (label->IsRedundant() &&
+ !label->is_loop_header()) {
+ bool can_eliminate = true;
+ for (int i = first + 1; i < last && can_eliminate; ++i) {
+ LInstruction* cur = instructions()->at(i);
+ if (cur->IsGap()) {
+ LGap* gap = LGap::cast(cur);
+ if (!gap->IsRedundant()) {
+ can_eliminate = false;
+ }
+ } else {
+ can_eliminate = false;
+ }
+ }
+
+ if (can_eliminate) {
+ label->set_replacement(GetLabel(goto_instr->block_id()));
+ }
+ }
+ }
+ }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+ LInstructionGap* gap = new LInstructionGap(block);
+ int index = -1;
+ if (instr->IsControl()) {
+ instructions_.Add(gap);
+ index = instructions_.length();
+ instructions_.Add(instr);
+ } else {
+ index = instructions_.length();
+ instructions_.Add(instr);
+ instructions_.Add(gap);
+ }
+ if (instr->HasPointerMap()) {
+ pointer_maps_.Add(instr->pointer_map());
+ instr->pointer_map()->set_lithium_position(index);
+ }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+ return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+ // The receiver is at index 0, the first parameter at index 1, so we
+ // shift all parameter indexes down by the number of parameters, and
+ // make sure they end up negative so they are distinguishable from
+ // spill slots.
+ int result = index - info()->scope()->num_parameters() - 1;
+ ASSERT(result < 0);
+ return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+ ASSERT(-1 <= index); // -1 is the receiver.
+ return (1 + info()->scope()->num_parameters() - index) *
+ kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+ return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+ return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+ while (!IsGapAt(index)) index--;
+ return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+ GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+ return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+ LConstantOperand* operand) const {
+ return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new LChunk(info(), graph());
+ HPhase phase("Building chunk", chunk_);
+ status_ = BUILDING;
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+ if (FLAG_trace_bailout) {
+ SmartArrayPointer<char> name(
+ info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ PrintF("\n");
+ }
+ status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+ return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ allocator_->RecordUse(value, operand);
+ return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr, int index) {
+ return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateInstruction<1, I, T>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id) {
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ instruction_pending_deoptimization_environment_ = instr;
+ pending_deoptimization_ast_id_ = ast_id;
+ return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+ instruction_pending_deoptimization_environment_ = NULL;
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ if (hinstr->HasObservableSideEffects()) {
+ ASSERT(hinstr->next()->IsSimulate());
+ HSimulate* sim = HSimulate::cast(hinstr->next());
+ instr = SetInstructionPendingDeoptimizationEnvironment(
+ instr, sim->ast_id());
+ }
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ instr->MarkAsSaveDoubles();
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new LPointerMap(position_));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ bool does_deopt = false;
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
+ }
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ ASSERT(op != Token::MOD);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(op == Token::ADD ||
+ op == Token::DIV ||
+ op == Token::MOD ||
+ op == Token::MUL ||
+ op == Token::SUB);
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ LOperand* left_operand = UseFixed(left, a1);
+ LOperand* right_operand = UseFixed(right, a0);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ ASSERT(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+ if (current->has_position()) position_ = current->position();
+ LInstruction* instr = current->CompileToLithium(this);
+
+ if (instr != NULL) {
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ instr->set_hydrogen_value(current);
+ chunk_->AddInstruction(instr, current_block_);
+ }
+ current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
+ if (hydrogen_env == NULL) return NULL;
+
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ int ast_id = hydrogen_env->ast_id();
+ ASSERT(ast_id != AstNode::kNoNumber);
+ int value_count = hydrogen_env->length();
+ LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
+ for (int i = 0; i < value_count; ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
+ HValue* value = hydrogen_env->values()->at(i);
+ LOperand* op = NULL;
+ if (value->IsArgumentsObject()) {
+ op = NULL;
+ } else if (value->IsPushArgument()) {
+ op = new LArgument((*argument_index_accumulator)++);
+ } else {
+ op = UseAny(value);
+ }
+ result->AddValue(op, value->representation());
+ }
+
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new LGoto(instr->FirstSuccessor()->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* v = instr->value();
+ if (v->EmitAtUses()) {
+ HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ ? instr->FirstSuccessor()
+ : instr->SecondSuccessor();
+ return new LGoto(successor->block_id());
+ }
+ return AssignEnvironment(new LBranch(UseRegister(v)));
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LInstanceOf* result =
+ new LInstanceOf(UseFixed(instr->left(), a0),
+ UseFixed(instr->right(), a1));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(UseFixed(instr->left(), a0), FixedTemp(t0));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ LOperand* receiver = UseFixed(instr->receiver(), a0);
+ LOperand* length = UseFixed(instr->length(), a2);
+ LOperand* elements = UseFixed(instr->elements(), a3);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ ++argument_count_;
+ LOperand* argument = Use(instr->argument());
+ return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+ HCallConstantFunction* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallConstantFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(function);
+ return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ BuiltinFunctionId op = instr->op();
+ if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ switch (op) {
+ case kMathAbs:
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ case kMathFloor:
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ case kMathSqrt:
+ return DefineAsRegister(result);
+ case kMathRound:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathPowHalf:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+ ASSERT(instr->key()->representation().IsTagged());
+ argument_count_ -= instr->argument_count();
+ LOperand* key = UseFixed(instr->key(), a2);
+ return MarkAsCall(DefineFixed(new LCallKeyed(key), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallNamed, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallKnownGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ argument_count_ -= instr->argument_count();
+ LCallNew* result = new LCallNew(constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallFunction(function), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallRuntime, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineAsRegister(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else if (instr->representation().IsInteger32()) {
+ // TODO(1042) The fixed register allocation
+ // is needed because we call TypeRecordingBinaryOpStub from
+ // the generated code, which requires registers a0
+ // and a1 to be used. We should remove that
+ // when we provide a native implementation.
+ LOperand* dividend = UseFixed(instr->left(), a0);
+ LOperand* divisor = UseFixed(instr->right(), a1);
+ return AssignEnvironment(AssignPointerMap(
+ DefineFixed(new LDivI(dividend, divisor), v0)));
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LModI* mod;
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ mod = new LModI(value, UseOrConstant(instr->right()));
+ } else {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ mod = new LModI(dividend,
+ divisor,
+ TempRegister(),
+ FixedTemp(f20),
+ FixedTemp(f22));
+ }
+
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ return AssignEnvironment(DefineAsRegister(mod));
+ } else {
+ return DefineAsRegister(mod);
+ }
+ } else if (instr->representation().IsTagged()) {
+ return DoArithmeticT(Token::MOD, instr);
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double modulo. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left;
+ LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* temp = NULL;
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (instr->CheckFlag(HValue::kCanOverflow) ||
+ !right->IsConstantOperand())) {
+ left = UseRegister(instr->LeastConstantOperand());
+ temp = TempRegister();
+ } else {
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ }
+ return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new LSubI(left, right);
+ LInstruction* result = DefineAsRegister(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LAddI* add = new LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), f4) :
+ UseFixed(instr->right(), a0);
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, f6),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ Representation r = instr->GetInputRepresentation();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new LCmpIDAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LJSArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+ HFixedArrayBaseLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LFixedArrayBaseLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LValueOf* result = new LValueOf(object, TempRegister());
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+ UseRegister(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* value = UseFixed(instr->value(), a0);
+ return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberUntagD* res = new LNumberUntagD(value);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ bool needs_check = !instr->value()->type().IsSmi();
+ LInstruction* res = NULL;
+ if (!needs_check) {
+ res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ } else {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+ : NULL;
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+ : NULL;
+ res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
+ res = AssignEnvironment(res);
+ }
+ return res;
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ // Make sure that the temp and result_temp registers are
+ // different.
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
+ Define(result, result_temp);
+ return AssignPointerMap(result);
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ LDoubleToI* res =
+ new LDoubleToI(value,
+ TempRegister(),
+ instr->CanTruncateToInt32() ? TempRegister() : NULL);
+ return AssignEnvironment(DefineAsRegister(res));
+ }
+ } else if (from.IsInteger32()) {
+ if (to.IsTagged()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return DefineSameAsFirst(new LSmiTag(value));
+ } else {
+ LNumberTagI* result = new LNumberTagI(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ LOperand* value = Use(instr->value());
+ return DefineAsRegister(new LInteger32ToDouble(value));
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new LCheckInstanceType(value);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new LCheckMap(value);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ // Revisit this decision, here and 8 lines below.
+ return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(f22)));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsTagged());
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve f22 explicitly.
+ LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(f22));
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = FixedTemp(f22);
+ LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ return new LReturn(UseFixed(instr->value(), v0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsInteger32()) {
+ return DefineAsRegister(new LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new LConstantD);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), a0);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* value = UseTempRegister(instr->value());
+ LInstruction* result = new LStoreGlobalCell(value, temp);
+ if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), a1);
+ LOperand* value = UseFixed(instr->value(), a0);
+ LStoreGlobalGeneric* result =
+ new LStoreGlobalGeneric(global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ return new LStoreContextSlot(context, value);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ return DefineAsRegister(
+ new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), a0);
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), v0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ Representation representation(instr->representation());
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* object = UseFixed(instr->object(), a1);
+ LOperand* key = UseFixed(instr->key(), a0);
+
+ LInstruction* result =
+ DefineFixed(new LLoadKeyedGeneric(object, key), v0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+
+ return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ Representation representation(instr->value()->representation());
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ bool val_is_temp_register =
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+
+ return new LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* obj = UseFixed(instr->object(), a2);
+ LOperand* key = UseFixed(instr->key(), a1);
+ LOperand* val = UseFixed(instr->value(), a0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, NULL);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LOperand* fixed_object_reg = FixedTemp(a2);
+ LOperand* new_map_reg = FixedTemp(a3);
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = needs_write_barrier
+ ? UseTempRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+
+ return new LStoreNamedField(obj, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* obj = UseFixed(instr->object(), a1);
+ LOperand* val = UseFixed(instr->value(), a0);
+
+ LInstruction* result = new LStoreNamedGeneric(obj, val);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LRegExpLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LFunctionLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+ LOperand* object = UseFixed(instr->object(), a0);
+ LOperand* key = UseFixed(instr->key(), a1);
+ LDeleteProperty* result = new LDeleteProperty(object, key);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
+ if (spill_index > LUnallocated::kMaxFixedIndex) {
+ Abort("Too many spill slots needed for OSR");
+ spill_index = 0;
+ }
+ return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallStub, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* length = UseTempRegister(instr->length());
+ LOperand* index = UseRegister(instr->index());
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), a0);
+ LToFastProperties* result = new LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LTypeof* result = new LTypeof(UseFixed(instr->value(), a0));
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ HEnvironment* env = current_block_->last_environment();
+ ASSERT(env != NULL);
+
+ env->set_ast_id(instr->ast_id());
+
+ env->Drop(instr->pop_count());
+ for (int i = 0; i < instr->values()->length(); ++i) {
+ HValue* value = instr->values()->at(i);
+ if (instr->HasAssignedIndexAt(i)) {
+ env->Bind(instr->GetAssignedIndexAt(i), value);
+ } else {
+ env->Push(value);
+ }
+ }
+
+ // If there is an instruction pending deoptimization environment create a
+ // lazy bailout instruction to capture the environment.
+ if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ LInstruction* result = new LLazyBailout;
+ result = AssignEnvironment(result);
+ instruction_pending_deoptimization_environment_->
+ set_deoptimization_environment(result->environment());
+ ClearInstructionPendingDeoptimizationEnvironment();
+ return result;
+ }
+
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->function(),
+ undefined,
+ instr->call_kind());
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment()->outer();
+ current_block_->UpdateEnvironment(outer);
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LIn* result = new LIn(key, object);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index ebc1e43bf..40f3f7a54 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -32,275 +32,2225 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
+#include "utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LCodeGen;
-class LEnvironment;
-class Translation;
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
+ V(ControlInstruction) \
+ V(Call) \
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(ArrayLiteral) \
+ V(BitI) \
+ V(BitNotI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallConstantFunction) \
+ V(CallFunction) \
+ V(CallGlobal) \
+ V(CallKeyed) \
+ V(CallKnownGlobal) \
+ V(CallNamed) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckFunction) \
+ V(CheckInstanceType) \
+ V(CheckMap) \
+ V(CheckNonSmi) \
+ V(CheckPrototypeMaps) \
+ V(CheckSmi) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpConstantEqAndBranch) \
+ V(CmpIDAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantI) \
+ V(ConstantT) \
+ V(Context) \
+ V(DeleteProperty) \
+ V(Deoptimize) \
+ V(DivI) \
+ V(DoubleToI) \
+ V(ElementsKind) \
+ V(FixedArrayBaseLength) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(GlobalObject) \
+ V(GlobalReceiver) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(In) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsNilAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
+ V(JSArrayLength) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadElements) \
+ V(LoadExternalArrayPointer) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
+ V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
+ V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
+ V(LoadNamedGeneric) \
+ V(ModI) \
+ V(MulI) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberUntagD) \
+ V(ObjectLiteral) \
+ V(OsrEntry) \
+ V(OuterContext) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
+ V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringLength) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(Throw) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(UnaryMathOperation) \
+ V(UnknownOSRValue) \
+ V(ValueOf)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const { return LInstruction::k##type; } \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(hydrogen_value()); \
+ }
+
class LInstruction: public ZoneObject {
public:
- LInstruction() { }
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
- // Predicates should be generated by macro as in lithium-ia32.h.
- virtual bool IsLabel() const {
- UNIMPLEMENTED();
- return false;
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ void set_deoptimization_environment(LEnvironment* env) {
+ deoptimization_environment_.set(env);
}
- virtual bool IsOsrEntry() const {
- UNIMPLEMENTED();
- return false;
+ LEnvironment* deoptimization_environment() const {
+ return deoptimization_environment_.get();
}
+ bool HasDeoptimizationEnvironment() const {
+ return deoptimization_environment_.is_set();
+ }
+
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
- LPointerMap* pointer_map() const {
- UNIMPLEMENTED();
- return NULL;
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() { return results_[0]; }
+
+ int InputCount() { return I; }
+ LOperand* InputAt(int i) { return inputs_[i]; }
+
+ int TempCount() { return T; }
+ LOperand* TempAt(int i) { return temps_[i]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
}
- bool HasPointerMap() const {
- UNIMPLEMENTED();
- return false;
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const { return true; }
+ virtual void PrintDataTo(StringStream* stream);
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
}
- void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+ bool IsRedundant() const;
- LEnvironment* environment() const {
- UNIMPLEMENTED();
- return NULL;
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+ if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ return parallel_moves_[pos];
}
- bool HasEnvironment() const {
- UNIMPLEMENTED();
- return false;
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
}
- virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap: public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(int block_id) : block_id_(block_id) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsControl() const { return true; }
+
+ int block_id() const { return block_id_; }
+
+ private:
+ int block_id_;
+};
+
- virtual bool IsControl() const {
- UNIMPLEMENTED();
- return false;
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
}
+ int gap_instructions_size() { return gap_instructions_size_; }
- void MarkAsCall() { UNIMPLEMENTED(); }
- void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+ private:
+ int gap_instructions_size_;
+};
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const {
- UNIMPLEMENTED();
- return false;
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LParameter: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+ TranscendentalCache::Type transcendental_type() {
+ return hydrogen()->transcendental_type();
+ }
+};
+
+
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+ virtual bool IsControl() const { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
}
- bool IsMarkedAsSaveDoubles() const {
- UNIMPLEMENTED();
- return false;
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
}
- virtual bool HasResult() const {
- UNIMPLEMENTED();
- return false;
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
}
- virtual LOperand* result() {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+ LArgumentsElements() { }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LTemplateInstruction<1, 2, 3> {
+ public:
+ // Used when the right hand is a constant power of 2.
+ LModI(LOperand* left,
+ LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = NULL;
+ temps_[1] = NULL;
+ temps_[2] = NULL;
}
- virtual int InputCount() {
- UNIMPLEMENTED();
- return 0;
+ // Used for the standard case.
+ LModI(LOperand* left,
+ LOperand* right,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
}
- virtual LOperand* InputAt(int i) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDivI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- virtual int TempCount() {
- UNIMPLEMENTED();
- return 0;
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
}
- virtual LOperand* TempAt(int i) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- LOperand* FirstInput() {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
- LOperand* Output() {
- UNIMPLEMENTED();
- return NULL;
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
-#ifdef DEBUG
- void VerifyCall() { UNIMPLEMENTED(); }
-#endif
+ DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+ virtual void PrintDataTo(StringStream* stream);
+ BuiltinFunctionId op() const { return hydrogen()->op(); }
};
-class LGap: public LInstruction {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- explicit LGap(HBasicBlock* block) { }
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
- HBasicBlock* block() const {
- UNIMPLEMENTED();
- return NULL;
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
+ inputs_[0] = left;
}
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
+
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNilAndBranch(LOperand* value) {
+ inputs_[0] = value;
}
- LParallelMove* GetParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LLabel: public LGap {
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
- explicit LLabel(HBasicBlock* block) : LGap(block) { }
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LOsrEntry: public LInstruction {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
- // Function could be generated by a macro as in lithium-ia32.h.
- static LOsrEntry* cast(LInstruction* instr) {
- UNIMPLEMENTED();
- return NULL;
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
}
- LOperand** SpilledRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- LOperand** SpilledDoubleRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
- UNIMPLEMENTED();
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
+ inputs_[0] = value;
}
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- UNIMPLEMENTED();
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
}
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
-class LChunk: public ZoneObject {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LChunk(HGraph* graph) { }
+ LCmpT(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
- HGraph* graph() const {
- UNIMPLEMENTED();
- return NULL;
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInstanceOf(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- const ZoneList<LPointerMap*>* pointer_maps() const {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- LOperand* GetNextSpillSlot(bool double_slot) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+};
+
+
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
}
- LConstantOperand* DefineConstantOperand(HConstant* constant) {
- UNIMPLEMENTED();
- return NULL;
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- LLabel* GetLabel(int block_id) const {
- UNIMPLEMENTED();
- return NULL;
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- const ZoneList<LInstruction*>* instructions() const {
- UNIMPLEMENTED();
- return NULL;
+ Token::Value op() const { return op_; }
+
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- int GetParameterStackSlot(int index) const {
- UNIMPLEMENTED();
- return 0;
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantD: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value() const { return hydrogen()->handle(); }
+};
+
+
+class LBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
}
- void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
- LGap* GetGapAt(int index) const {
- UNIMPLEMENTED();
- return NULL;
+
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- bool IsGapAt(int index) const {
- UNIMPLEMENTED();
- return false;
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ virtual bool IsControl() const { return true; }
+
+ Handle<Map> map() const { return hydrogen()->map(); }
+ int true_block_id() const {
+ return hydrogen()->FirstSuccessor()->block_id();
+ }
+ int false_block_id() const {
+ return hydrogen()->SecondSuccessor()->block_id();
}
+};
+
- int NearestGapPos(int index) const {
- UNIMPLEMENTED();
- return 0;
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LJSArrayLength(LOperand* value) {
+ inputs_[0] = value;
}
- void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+ DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
- CompilationInfo* info() const {
- UNIMPLEMENTED();
- return NULL;
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFixedArrayBaseLength(LOperand* value) {
+ inputs_[0] = value;
}
-#ifdef DEBUG
- void Verify() { UNIMPLEMENTED(); }
-#endif
+ DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+ "fixed-array-base-length")
+ DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+};
+
+
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LValueOf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+ DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+};
+
+
+class LThrow: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LThrow(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const;
+
+ Token::Value op() const { return op_; }
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LReturn(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+ LOperand* object() { return inputs_[0]; }
+};
+
+
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedGeneric(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ LOperand* object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadFunctionPrototype(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+ LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadElements(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadExternalArrayPointer(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadGlobalGeneric(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* global_object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = global_object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* global_object() { return InputAt(0); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(1); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<JSFunction> function() { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInvokeFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* function() { return inputs_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+ DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<String> name() const { return hydrogen()->name(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+ DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<String> name() const {return hydrogen()->name(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<JSFunction> target() const { return hydrogen()->target(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNew(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberTagI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LTaggedToI(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ bool needs_check() const { return needs_check_; }
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreNamedField(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+};
+
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* char_code) {
+ inputs_[0] = char_code;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+ LOperand* char_code() { return inputs_[0]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckFunction(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+ DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckMap(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+ public:
+ LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+ Handle<JSObject> holder() const { return hydrogen()->holder(); }
+};
+
+
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+};
+
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampDToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+ Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTypeof(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDeleteProperty(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry();
+
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+ LOperand** SpilledRegisterArray() { return register_spills_; }
+ LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+ void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+ void MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand);
+
+ private:
+ // Arrays of spill slot operands for registers with an assigned spill
+ // slot, i.e., that must also be restored to the spill slot on OSR entry.
+ // NULL if the register has no assigned spill slot. Indexed by allocation
+ // index.
+ LOperand* register_spills_[Register::kNumAllocatableRegisters];
+ LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LIn(LOperand* key, LOperand* object) {
+ inputs_[0] = key;
+ inputs_[1] = object;
+ }
+
+ LOperand* key() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+ explicit LChunk(CompilationInfo* info, HGraph* graph);
+
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ LConstantOperand* DefineConstantOperand(HConstant* constant);
+ Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+ Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+ int GetNextSpillIndex(bool is_double);
+ LOperand* GetNextSpillSlot(bool is_double);
+
+ int ParameterAt(int index);
+ int GetParameterStackSlot(int index) const;
+ int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+ const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+ void AddGapMove(int index, LOperand* from, LOperand* to);
+ LGap* GetGapAt(int index) const;
+ bool IsGapAt(int index) const;
+ int NearestGapPos(int index) const;
+ void MarkEmptyBlocks();
+ const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+ LLabel* GetLabel(int block_id) const {
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ int first_instruction = block->first_instruction_index();
+ return LLabel::cast(instructions_[first_instruction]);
+ }
+ int LookupDestination(int block_id) const {
+ LLabel* cur = GetLabel(block_id);
+ while (cur->replacement() != NULL) {
+ cur = cur->replacement();
+ }
+ return cur->block_id();
+ }
+ Label* GetAssemblyLabel(int block_id) const {
+ LLabel* label = GetLabel(block_id);
+ ASSERT(!label->HasReplacement());
+ return label->label();
+ }
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+ return &inlined_closures_;
+ }
+
+ void AddInlinedClosure(Handle<JSFunction> closure) {
+ inlined_closures_.Add(closure);
+ }
+
+ private:
+ int spill_slot_count_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ ZoneList<LInstruction*> instructions_;
+ ZoneList<LPointerMap*> pointer_maps_;
+ ZoneList<Handle<JSFunction> > inlined_closures_;
};
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ argument_count_(0),
+ allocator_(allocator),
+ position_(RelocInfo::kNoPosition),
+ instruction_pending_deoptimization_environment_(NULL),
+ pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
- LChunk* Build() {
- UNIMPLEMENTED();
- return NULL;
- };
+ LChunk* Build();
// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
- UNIMPLEMENTED(); \
- return NULL; \
- }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(const char* format, ...);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LRegister* ToOperand(Register reg);
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ DoubleRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+ LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id);
+ void ClearInstructionPendingDeoptimizationEnvironment();
+
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
+
+ void VisitInstruction(HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+
+ LChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ int argument_count_;
+ LAllocator* allocator_;
+ int position_;
+ LInstruction* instruction_pending_deoptimization_environment_;
+ int pending_deoptimization_ast_id_;
+
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 4c48ef183..c1161d73d 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -42,7 +42,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true) {
+ allow_stub_calls_(true),
+ has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -80,46 +81,15 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address: Clear bits from 0 to kPageSizeBits.
- if (mips32r2) {
- Ins(object, zero_reg, 0, kPageSizeBits);
- } else {
- // The Ins macro is slow on r1, so use shifts instead.
- srl(object, object, kPageSizeBits);
- sll(object, object, kPageSizeBits);
- }
-
- // Calculate region number.
- Ext(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- li(at, Operand(1));
- sllv(at, at, address);
- or_(scratch, scratch, at);
- sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ASSERT(num_unsaved >= 0);
- Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+ if (num_unsaved > 0) {
+ Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
MultiPush(kSafepointSavedRegisters);
}
@@ -127,7 +97,9 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
- Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+ if (num_unsaved > 0) {
+ Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
}
@@ -180,6 +152,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -187,8 +160,6 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
@@ -200,38 +171,53 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Will clobber 4 registers: object, scratch0, scratch1, at. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASSERT(!AreAliased(value, dst, t8, object));
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
- // Add offset into the object.
- Addu(scratch0, object, offset);
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
+ Addu(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, t8, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object,
+ dst,
+ value,
+ ra_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(object, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+ li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
@@ -241,29 +227,96 @@ void MacroAssembler::RecordWrite(Register object,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register scratch) {
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASSERT(!AreAliased(object, address, value, t8));
+ ASSERT(!AreAliased(object, address, value, t9));
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+ ASSERT(!address.is(cp) && !value.is(cp));
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq,
+ &done);
// Record the actual write.
- RecordWriteHelper(object, address, scratch);
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(object, Operand(BitCast<int32_t>(kZapValue)));
- li(address, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+ li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ li(t8, Operand(store_buffer));
+ lw(scratch, MemOperand(t8));
+ // Store pointer to buffer and increment buffer top.
+ sw(address, MemOperand(scratch));
+ Addu(scratch, scratch, kPointerSize);
+ // Write back new top of buffer.
+ sw(scratch, MemOperand(t8));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kFallThroughAtEnd) {
+ Branch(&done, eq, t8, Operand(zero_reg));
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Ret(eq, t8, Operand(zero_reg));
+ }
+ push(ra);
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(ra);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
}
}
@@ -707,7 +760,7 @@ void MacroAssembler::MultiPush(RegList regs) {
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -746,7 +799,7 @@ void MacroAssembler::MultiPop(RegList regs) {
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t stack_offset = 0;
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
@@ -762,7 +815,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
@@ -804,7 +857,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
@@ -814,6 +867,21 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
}
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+ RegList saved_regs = kJSCallerSaved | ra.bit();
+ MultiPush(saved_regs);
+ AllowExternalCallThatCantCauseGC scope(this);
+
+ // Save to a0 in case address == t0.
+ Move(a0, address);
+ PrepareCallCFunction(2, t0);
+
+ li(a1, instructions * kInstrSize);
+ CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+ MultiPop(saved_regs);
+}
+
+
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
@@ -842,34 +910,21 @@ void MacroAssembler::Ins(Register rt,
uint16_t pos,
uint16_t size) {
ASSERT(pos < 32);
- ASSERT(pos + size < 32);
+ ASSERT(pos + size <= 32);
+ ASSERT(size != 0);
if (mips32r2) {
ins_(rt, rs, pos, size);
} else {
ASSERT(!rt.is(t8) && !rs.is(t8));
-
- srl(t8, rt, pos + size);
- // The left chunk from rt that needs to
- // be saved is on the right side of t8.
- sll(at, t8, pos + size);
- // The 'at' register now contains the left chunk on
- // the left (proper position) and zeroes.
- sll(t8, rt, 32 - pos);
- // t8 now contains the right chunk on the left and zeroes.
- srl(t8, t8, 32 - pos);
- // t8 now contains the right chunk on
- // the right (proper position) and zeroes.
- or_(rt, at, t8);
- // rt now contains the left and right chunks from the original rt
- // in their proper position and zeroes in the middle.
- sll(t8, rs, 32 - size);
- // t8 now contains the chunk from rs on the left and zeroes.
- srl(t8, t8, 32 - size - pos);
- // t8 now contains the original chunk from rs in
- // the middle (proper position).
- or_(rt, rt, t8);
- // rt now contains the result of the ins instruction in R2 mode.
+ Subu(at, zero_reg, Operand(1));
+ srl(at, at, 32 - size);
+ and_(t8, rs, at);
+ sll(t8, t8, pos);
+ sll(at, at, pos);
+ nor(at, at, zero_reg);
+ and_(at, rt, at);
+ or_(rt, t8, at);
}
}
@@ -940,11 +995,9 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
// Test if scratch > fd.
- c(OLT, D, fd, scratch);
-
- Label simple_convert;
// If fd < 2^31 we can convert it normally.
- bc1t(&simple_convert);
+ Label simple_convert;
+ BranchF(&simple_convert, NULL, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -964,6 +1017,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::BranchF(Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2,
+ BranchDelaySlot bd) {
+ if (cc == al) {
+ Branch(bd, target);
+ return;
+ }
+
+ ASSERT(nan || target);
+ // Check for unordered (NaN) cases.
+ if (nan) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ }
+
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ switch (cc) {
+ case Uless:
+ case less:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case Ugreater:
+ case greater:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case Ugreater_equal:
+ case greater_equal:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case Uless_equal:
+ case less_equal:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ };
+ }
+
+ if (bd == PROTECT) {
+ nop();
+ }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation zero(0.0);
+ DoubleRepresentation value(imm);
+ // Handle special values first.
+ bool force_load = dst.is(kDoubleRegZero);
+ if (value.bits == zero.bits && !force_load) {
+ mov_d(dst, kDoubleRegZero);
+ } else if (value.bits == minus_zero.bits && !force_load) {
+ neg_d(dst, kDoubleRegZero);
+ } else {
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(imm, &lo, &hi);
+ // Move the low part of the double into the lower of the corresponding FPU
+ // register of FPU register pair.
+ if (lo != 0) {
+ li(at, Operand(lo));
+ mtc1(at, dst);
+ } else {
+ mtc1(zero_reg, dst);
+ }
+ // Move the high part of the double into the higher of the corresponding FPU
+ // register of FPU register pair.
+ if (hi != 0) {
+ li(at, Operand(hi));
+ mtc1(at, dst.high());
+ } else {
+ mtc1(zero_reg, dst.high());
+ }
+ }
+}
+
+
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
@@ -1062,6 +1211,53 @@ void MacroAssembler::ConvertToInt32(Register source,
}
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+ FPURegister result,
+ DoubleRegister double_input,
+ Register scratch1,
+ Register except_flag,
+ CheckForInexactConversion check_inexact) {
+ ASSERT(CpuFeatures::IsSupported(FPU));
+ CpuFeatures::Scope scope(FPU);
+
+ int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
+
+ if (check_inexact == kDontCheckForInexactConversion) {
+ // Ingore inexact exceptions.
+ except_mask &= ~kFCSRInexactFlagMask;
+ }
+
+ // Save FCSR.
+ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ ctc1(zero_reg, FCSR);
+
+ // Do operation based on rounding mode.
+ switch (rounding_mode) {
+ case kRoundToNearest:
+ round_w_d(result, double_input);
+ break;
+ case kRoundToZero:
+ trunc_w_d(result, double_input);
+ break;
+ case kRoundToPlusInf:
+ ceil_w_d(result, double_input);
+ break;
+ case kRoundToMinusInf:
+ floor_w_d(result, double_input);
+ break;
+ } // End of switch-statement.
+
+ // Retrieve FCSR.
+ cfc1(except_flag, FCSR);
+ // Restore FCSR.
+ ctc1(scratch1, FCSR);
+
+ // Check for fpu exceptions.
+ And(except_flag, except_flag, Operand(except_mask));
+}
+
+
void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
@@ -1148,22 +1344,21 @@ void MacroAssembler::EmitECMATruncate(Register result,
FPURegister double_input,
FPURegister single_scratch,
Register scratch,
- Register input_high,
- Register input_low) {
+ Register scratch2,
+ Register scratch3) {
CpuFeatures::Scope scope(FPU);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch2.is(result));
+ ASSERT(!scratch3.is(result));
+ ASSERT(!scratch3.is(scratch2));
ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
+ !scratch.is(scratch2) &&
+ !scratch.is(scratch3));
ASSERT(!single_scratch.is(double_input));
Label done;
Label manual;
// Clear cumulative exception flags and save the FCSR.
- Register scratch2 = input_high;
cfc1(scratch2, FCSR);
ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
@@ -1180,6 +1375,8 @@ void MacroAssembler::EmitECMATruncate(Register result,
Branch(&done, eq, scratch, Operand(zero_reg));
// Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
Move(input_low, input_high, double_input);
EmitOutOfInt32RangeTruncate(result,
input_high,
@@ -1211,15 +1408,6 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-bool MacroAssembler::UseAbsoluteCodePointers() {
- if (is_trampoline_emitted()) {
- return true;
- } else {
- return false;
- }
-}
-
-
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
BranchShort(offset, bdslot);
}
@@ -1233,11 +1421,18 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Jr(L, bdslot);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L, bdslot);
+ } else {
+ Jr(L, bdslot);
+ }
} else {
- BranchShort(L, bdslot);
+ if (is_trampoline_emitted()) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
}
}
@@ -1245,15 +1440,26 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L, cond, rs, rt, bdslot);
+ } else {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ }
} else {
- BranchShort(L, cond, rs, rt, bdslot);
+ if (is_trampoline_emitted()) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
}
}
@@ -1276,8 +1482,8 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
Register scratch = at;
if (rt.is_reg()) {
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+ // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+ // rt.
r2 = rt.rm_;
switch (cond) {
case cc_always:
@@ -1779,11 +1985,18 @@ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Jalr(L, bdslot);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L, bdslot);
+ } else {
+ Jalr(L, bdslot);
+ }
} else {
- BranchAndLinkShort(L, bdslot);
+ if (is_trampoline_emitted()) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
}
}
@@ -1791,15 +2004,26 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
- bind(&skip);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ } else {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ }
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ if (is_trampoline_emitted()) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ }
}
}
@@ -2306,10 +2530,10 @@ void MacroAssembler::Push(Handle<Object> handle) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
mov(a0, zero_reg);
li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -2320,60 +2544,50 @@ void MacroAssembler::DebugBreak() {
// Exception handling.
void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+ HandlerType type,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // The return address is passed in register ra.
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve a0-a3 and s0.
+ // t1-t3 are available. We will build up the handler from the bottom by
+ // pushing on the stack. First compute the state.
+ unsigned state = StackHandler::OffsetField::encode(handler_index);
if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- li(t0, Operand(StackHandler::TRY_CATCH));
- } else {
- li(t0, Operand(StackHandler::TRY_FINALLY));
- }
- // Save the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(t1, MemOperand(t2));
-
- addiu(sp, sp, -StackHandlerConstants::kSize);
- sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
- sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
- sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
- sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- // Link this handler as the new current one.
- sw(sp, MemOperand(t2));
-
+ state |= (type == TRY_CATCH_HANDLER)
+ ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+ : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
} else {
- // Must preserve a0-a3, and s0 (argv).
ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for fp. We expect the code throwing an exception to check fp
- // before dereferencing it to restore the context.
- li(t0, Operand(StackHandler::ENTRY));
-
- // Save the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(t1, MemOperand(t2));
-
- ASSERT(Smi::FromInt(0) == 0); // Used for no context.
+ state |= StackHandler::KindField::encode(StackHandler::ENTRY);
+ }
- addiu(sp, sp, -StackHandlerConstants::kSize);
- sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
- sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
- sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
- sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
- sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ // Set up the code object (t1) and the state (t2) for pushing.
+ li(t1, Operand(CodeObject()));
+ li(t2, Operand(state));
- // Link this handler as the new current one.
- sw(sp, MemOperand(t2));
+ // Push the frame pointer, context, state, and code object.
+ if (try_location == IN_JAVASCRIPT) {
+ MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
+ } else {
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ // The second zero_reg indicates no context.
+ // The first zero_reg is the NULL frame pointer.
+ // The operands are reversed to match the order of MultiPush/Pop.
+ Push(zero_reg, zero_reg, t2, t1);
}
+
+ // Link the current handler as the next handler.
+ li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ lw(t1, MemOperand(t2));
+ push(t1);
+ // Set this new handler as the current one.
+ sw(sp, MemOperand(t2));
}
@@ -2386,19 +2600,36 @@ void MacroAssembler::PopTryHandler() {
}
-void MacroAssembler::Throw(Register value) {
- // v0 is expected to hold the exception.
- Move(v0, value);
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // v0 = exception, a1 = code object, a2 = state.
+ lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
+ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ srl(a2, a2, StackHandler::kKindWidth); // Handler index.
+ sll(a2, a2, kPointerSizeLog2);
+ Addu(a2, a3, a2);
+ lw(a2, MemOperand(a2)); // Smi-tagged offset.
+ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
+ sra(t9, a2, kSmiTagSize);
+ Addu(t9, t9, a1);
+ Jump(t9); // Jump.
+}
+
+void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in v0.
+ Move(v0, value);
- // Drop the sp to the top of the handler.
+ // Drop the stack pointer to the top of the top handler.
li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
lw(sp, MemOperand(a3));
@@ -2407,44 +2638,19 @@ void MacroAssembler::Throw(Register value) {
pop(a2);
sw(a2, MemOperand(a3));
- // Restore context and frame pointer, discard state (a3).
- MultiPop(a3.bit() | cp.bit() | fp.bit());
+ // Get the code object (a1) and state (a2). Restore the context and frame
+ // pointer.
+ MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
// If the handler is a JS frame, restore the context to the frame.
- // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
- // of them.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
Label done;
- Branch(&done, eq, fp, Operand(zero_reg));
+ Branch(&done, eq, cp, Operand(zero_reg));
sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
bind(&done);
-#ifdef DEBUG
- // When emitting debug_code, set ra as return address for the jump.
- // 5 instructions: add: 1, pop: 2, jump: 2.
- const int kOffsetRaInstructions = 5;
- Label find_ra;
-
- if (emit_debug_code()) {
- // Compute ra for the Jump(t9).
- const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
-
- // This branch-and-link sequence is needed to get the current PC on mips,
- // saved to the ra register. Then adjusted for instruction count.
- bal(&find_ra); // bal exposes branch-delay.
- nop(); // Branch delay slot nop.
- bind(&find_ra);
- addiu(ra, ra, kOffsetRaBytes);
- }
-#endif
-
- pop(t9); // 2 instructions: lw, add sp.
- Jump(t9); // 2 instructions: jr, nop (in delay slot).
-
- if (emit_debug_code()) {
- // Make sure that the expected number of instructions were generated.
- ASSERT_EQ(kOffsetRaInstructions,
- InstructionsGeneratedSince(&find_ra));
- }
+ JumpToHandlerEntry();
}
@@ -2453,39 +2659,16 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // v0 is expected to hold the exception.
- Move(v0, value);
-
- // Drop sp to the top stack handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(sp, MemOperand(a3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- lw(a2, MemOperand(sp, kStateOffset));
- Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- lw(sp, MemOperand(sp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(a2);
- sw(a2, MemOperand(a3));
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+ // The exception is expected in v0.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
li(a0, Operand(false, RelocInfo::NONE));
li(a2, Operand(external_caught));
sw(a0, MemOperand(a2));
@@ -2494,45 +2677,37 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
Failure* out_of_memory = Failure::OutOfMemoryException();
li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
+ isolate())));
sw(v0, MemOperand(a2));
+ } else if (!value.is(v0)) {
+ mov(v0, value);
}
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // cp
- // fp
- // ra
-
- // Restore context and frame pointer, discard state (r2).
- MultiPop(a2.bit() | cp.bit() | fp.bit());
-
-#ifdef DEBUG
- // When emitting debug_code, set ra as return address for the jump.
- // 5 instructions: add: 1, pop: 2, jump: 2.
- const int kOffsetRaInstructions = 5;
- Label find_ra;
+ // Drop the stack pointer to the top of the top stack handler.
+ li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ lw(sp, MemOperand(a3));
- if (emit_debug_code()) {
- // Compute ra for the Jump(t9).
- const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind);
+ bind(&fetch_next);
+ lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::ENTRY == 0);
+ lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ And(a2, a2, Operand(StackHandler::KindField::kMask));
+ Branch(&fetch_next, ne, a2, Operand(zero_reg));
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(a2);
+ sw(a2, MemOperand(a3));
- // This branch-and-link sequence is needed to get the current PC on mips,
- // saved to the ra register. Then adjusted for instruction count.
- bal(&find_ra); // bal exposes branch-delay slot.
- nop(); // Branch delay slot nop.
- bind(&find_ra);
- addiu(ra, ra, kOffsetRaBytes);
- }
-#endif
- pop(t9); // 2 instructions: lw, add sp.
- Jump(t9); // 2 instructions: jr, nop (in delay slot).
+ // Get the code object (a1) and state (a2). Clear the context and frame
+ // pointer (0 was saved in the handler).
+ MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
- if (emit_debug_code()) {
- // Make sure that the expected number of instructions were generated.
- ASSERT_EQ(kOffsetRaInstructions,
- InstructionsGeneratedSince(&find_ra));
- }
+ JumpToHandlerEntry();
}
@@ -2635,6 +2810,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
+ ASSERT(!object_size.is(t9));
ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
// Check relative positions of allocation top and limit addresses.
@@ -2972,15 +3148,140 @@ void MacroAssembler::CopyBytes(Register src,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ Branch(&entry);
+ bind(&loop);
+ sw(filler, MemOperand(start_offset));
+ Addu(start_offset, start_offset, kPointerSize);
+ bind(&entry);
+ Branch(&loop, lt, start_offset, Operand(end_offset));
+}
+
+
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, ls, scratch,
+ Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail) {
+ Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Register mantissa_reg = scratch2;
+ Register exponent_reg = scratch3;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg,
+ scratch1,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+
+ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ bind(&have_double_value);
+ sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ Addu(scratch1, scratch1, elements_reg);
+ sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ sw(exponent_reg, FieldMemOperand(scratch1, offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
+ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ jmp(&have_double_value);
+
+ bind(&smi_value);
+ Addu(scratch1, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ Addu(scratch1, scratch1, scratch2);
+ // scratch1 is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(FPU)) {
+ destination = FloatingPointHelper::kFPURegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+
+ Register untagged_value = receiver_reg;
+ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(this,
+ untagged_value,
+ destination,
+ f0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ f2);
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ sdc1(f0, MemOperand(scratch1, 0));
+ } else {
+ sw(mantissa_reg, MemOperand(scratch1, 0));
+ sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -3171,13 +3472,18 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
SetCallKind(t1, call_kind);
Call(code);
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
SetCallKind(t1, call_kind);
@@ -3195,6 +3501,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -3217,6 +3526,9 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
@@ -3235,24 +3547,23 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
- ASSERT(function->is_compiled());
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- li(a1, Operand(Handle<JSFunction>(function)));
+ li(a1, Operand(function));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Invoke the cached code.
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- UNIMPLEMENTED_MIPS();
- } else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
}
@@ -3293,7 +3604,8 @@ void MacroAssembler::IsObjectJSStringType(Register object,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
@@ -3301,6 +3613,16 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
GetObjectType(function, result, scratch);
Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ if (miss_on_bound_function) {
+ lw(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ lw(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ And(scratch, scratch,
+ Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+ Branch(miss, ne, scratch, Operand(zero_reg));
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -3349,51 +3671,24 @@ void MacroAssembler::GetObjectType(Register object,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
- kNoASTId, cond, r1, r2);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
- Condition cond,
- Register r1,
- const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
- return result;
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -3464,11 +3759,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
Ret();
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -3481,8 +3775,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
1);
mov(v0, s0);
jmp(&leave_exit_frame);
+}
+
- return result;
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
}
@@ -3566,7 +3864,16 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
- ASSERT(!left.is(right));
+
+ if (left.is(right) && dst.is(left)) {
+ ASSERT(!dst.is(t9));
+ ASSERT(!scratch.is(t9));
+ ASSERT(!left.is(t9));
+ ASSERT(!right.is(t9));
+ ASSERT(!overflow_dst.is(t9));
+ mov(t9, right);
+ right = t9;
+ }
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
@@ -3599,10 +3906,17 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
- ASSERT(!left.is(right));
ASSERT(!scratch.is(left));
ASSERT(!scratch.is(right));
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ mov(overflow_dst, zero_reg);
+ return;
+ }
+
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
subu(dst, left, right); // Left is overwritten.
@@ -3651,8 +3965,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
li(a0, Operand(function->nargs));
li(a1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -3684,17 +3997,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- li(a0, num_arguments);
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -3711,17 +4013,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& builtin) {
- li(a1, Operand(builtin));
- CEntryStub stub(1);
- return TryTailCallStub(&stub);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
@@ -3854,14 +4151,20 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
li(a0, Operand(p0));
push(a0);
li(a0, Operand(Smi::FromInt(p1 - p0)));
push(a0);
- CallRuntime(Runtime::kAbort, 2);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
// Will not return here.
if (is_trampoline_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -4114,8 +4417,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1, kSmiTagMask);
or_(at, reg1, reg2);
- andi(at, at, kSmiTagMask);
- Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+ JumpIfNotSmi(at, on_not_both_smi);
}
@@ -4126,8 +4428,7 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
ASSERT_EQ(1, kSmiTagMask);
// Both Smi tags must be 1 (not Smi).
and_(at, reg1, reg2);
- andi(at, at, kSmiTagMask);
- Branch(on_either_smi, eq, at, Operand(zero_reg));
+ JumpIfSmi(at, on_either_smi);
}
@@ -4205,8 +4506,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
And(scratch1, first, Operand(second));
- And(scratch1, scratch1, Operand(kSmiTagMask));
- Branch(failure, eq, scratch1, Operand(zero_reg));
+ JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
second,
scratch1,
@@ -4245,7 +4545,23 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
static const int kRegisterPassedArguments = 4;
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ num_reg_arguments += 2 * num_double_arguments;
+
+ // Up to four simple arguments are passed in registers a0..a3.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ stack_passed_words += kCArgSlotCount;
+ return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
int frame_alignment = ActivationFrameAlignment();
// Up to four simple arguments are passed in registers a0..a3.
@@ -4253,9 +4569,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// mips, even though those argument slots are not normally used.
// Remaining arguments are pushed on the stack, above (higher address than)
// the argument slots.
- int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments) +
- kCArgSlotCount;
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -4270,26 +4585,43 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ li(t8, Operand(function));
+ CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- CallCFunctionHelper(no_reg, function, t8, num_arguments);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
int num_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_arguments);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments) {
+ int num_reg_arguments,
+ int num_double_arguments) {
+ ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -4317,19 +4649,15 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
- if (function.is(no_reg)) {
- function = t9;
- li(function, Operand(function_reference));
- } else if (!function.is(t9)) {
+ if (!function.is(t9)) {
mov(t9, function);
function = t9;
}
Call(function);
- int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments) +
- kCArgSlotCount;
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
if (OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -4342,6 +4670,235 @@ void MacroAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+ Register scratch,
+ Register new_value) {
+ lw(scratch, MemOperand(li_location));
+ // At this point scratch is a lui(at, ...) instruction.
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, "The instruction to patch should be a lui.",
+ scratch, Operand(LUI));
+ lw(scratch, MemOperand(li_location));
+ }
+ srl(t9, new_value, kImm16Bits);
+ Ins(scratch, t9, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location));
+
+ lw(scratch, MemOperand(li_location, kInstrSize));
+ // scratch is now ori(at, ...).
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, "The instruction to patch should be an ori.",
+ scratch, Operand(ORI));
+ lw(scratch, MemOperand(li_location, kInstrSize));
+ }
+ Ins(scratch, new_value, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location, kInstrSize));
+
+ // Update the I-cache so the new lui and ori can be executed.
+ FlushICache(li_location, 2);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met) {
+ And(scratch, object, Operand(~Page::kPageAlignmentMask));
+ lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ And(t8, t9, Operand(mask_scratch));
+ Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
+ // Shift left 1 by adding.
+ Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
+ Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
+ And(t8, t9, Operand(mask_scratch));
+ Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
+ jmp(&other_color);
+
+ bind(&word_boundary);
+ lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+ And(t9, t9, Operand(1));
+ Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ ASSERT(!AreAliased(value, scratch, t8, no_reg));
+ Label is_data_object;
+ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ Branch(&is_data_object, eq, t8, Operand(scratch));
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ Branch(not_data_object, ne, t8, Operand(zero_reg));
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+ Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ sll(t8, t8, kPointerSizeLog2);
+ Addu(bitmap_reg, bitmap_reg, t8);
+ li(t8, Operand(1));
+ sllv(mask_reg, t8, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ And(t8, mask_scratch, load_scratch);
+ Branch(&done, ne, t8, Operand(zero_reg));
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // sll may overflow, making the check conservative.
+ sll(t8, mask_scratch, 1);
+ And(t8, load_scratch, t8);
+ Branch(&ok, eq, t8, Operand(zero_reg));
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object;
+
+ // Check for heap-number
+ lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ {
+ Label skip;
+ Branch(&skip, ne, t8, Operand(map));
+ li(length, HeapNumber::kSize);
+ Branch(&is_data_object);
+ bind(&skip);
+ }
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ And(t8, instance_type, Operand(kExternalStringTag));
+ {
+ Label skip;
+ Branch(&skip, eq, t8, Operand(zero_reg));
+ li(length, ExternalString::kSize);
+ Branch(&is_data_object);
+ bind(&skip);
+ }
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ lw(t9, FieldMemOperand(value, String::kLengthOffset));
+ And(t8, instance_type, Operand(kStringEncodingMask));
+ {
+ Label skip;
+ Branch(&skip, eq, t8, Operand(zero_reg));
+ srl(t9, t9, 1);
+ bind(&skip);
+ }
+ Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ And(length, length, Operand(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Or(t8, t8, Operand(mask_scratch));
+ sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+ lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Addu(t8, t8, Operand(length));
+ sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors,
@@ -4353,6 +4910,60 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+ ASSERT(!output_reg.is(input_reg));
+ Label done;
+ li(output_reg, Operand(255));
+ // Normal branch: nop in delay slot.
+ Branch(&done, gt, input_reg, Operand(output_reg));
+ // Use delay slot in this branch.
+ Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+ mov(output_reg, zero_reg); // In delay slot.
+ mov(output_reg, input_reg); // Value is in range 0..255.
+ bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg) {
+ Label above_zero;
+ Label done;
+ Label in_bounds;
+
+ Move(temp_double_reg, 0.0);
+ BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+ // Double value is less than zero, NaN or Inf, return 0.
+ mov(result_reg, zero_reg);
+ Branch(&done);
+
+ // Double value is >= 255, return 255.
+ bind(&above_zero);
+ Move(temp_double_reg, 255.0);
+ BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+ li(result_reg, Operand(255));
+ Branch(&done);
+
+ // In 0-255 range, round and truncate.
+ bind(&in_bounds);
+ round_w_d(temp_double_reg, input_reg);
+ mfc1(result_reg, temp_double_reg);
+ bind(&done);
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 5dd012e93..6b2a5511e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -50,15 +50,16 @@ class JumpTarget;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
-// Registers aliases
+
+// Register aliases.
// cp is assumed to be a callee saved register.
+const Register lithiumScratchReg = s3; // Scratch register.
+const Register lithiumScratchReg2 = s4; // Scratch register.
+const Register condReg = s5; // Simulated (partial) condition code for mips.
const Register roots = s6; // Roots array pointer.
const Register cp = s7; // JavaScript context pointer.
const Register fp = s8_fp; // Alias for fp.
-// Registers used for condition evaluation.
-const Register condReg1 = s4;
-const Register condReg2 = s5;
-
+const DoubleRegister lithiumScratchDouble = f30; // Double scratch register.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -90,6 +91,43 @@ enum BranchDelaySlot {
PROTECT
};
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -138,21 +176,22 @@ class MacroAssembler: public Assembler {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- int CallSize(Register target, COND_ARGS);
+ static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
- int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- COND_ARGS);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
void Ret(COND_ARGS);
- inline void Ret(BranchDelaySlot bd) {
- Ret(al, zero_reg, Operand(zero_reg), bd);
+ inline void Ret(BranchDelaySlot bd, Condition cond = al,
+ Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+ Ret(cond, rs, rt, bd);
}
#undef COND_ARGS
@@ -197,6 +236,8 @@ class MacroAssembler: public Assembler {
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
}
+ void Move(FPURegister dst, double imm);
+
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
@@ -206,6 +247,7 @@ class MacroAssembler: public Assembler {
Branch(L);
}
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -221,39 +263,127 @@ class MacroAssembler: public Assembler {
Condition cond, Register src1, const Operand& src2);
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc, // eq for new space, ne otherwise.
- Label* branch);
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object,
+ Register value,
+ Register address);
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
-
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the 'at' register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
Register scratch0,
- Register scratch1);
+ Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* object_is_white_and_not_data);
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
+ // Detects conservatively whether an object is data-only, ie it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ ra_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// ---------------------------------------------------------------------------
@@ -517,6 +647,14 @@ class MacroAssembler: public Assembler {
Addu(sp, sp, 2 * kPointerSize);
}
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ lw(src3, MemOperand(sp, 0 * kPointerSize));
+ lw(src2, MemOperand(sp, 1 * kPointerSize));
+ lw(src1, MemOperand(sp, 2 * kPointerSize));
+ Addu(sp, sp, 3 * kPointerSize);
+ }
+
void Pop(uint32_t count = 1) {
Addu(sp, sp, Operand(count * kPointerSize));
}
@@ -535,10 +673,17 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+ // Does not handle errors.
+ void FlushICache(Register address, unsigned instructions);
+
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -547,6 +692,24 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ // Wrapper function for the different cmp/branch types.
+ void BranchF(Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+ inline void BranchF(BranchDelaySlot bd,
+ Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2) {
+ BranchF(target, nan, cc, cmp1, cmp2, bd);
+ };
+
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. If FPU is available double_scratch is used but not
@@ -558,6 +721,18 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
+ // Truncates a double using a specific rounding mode.
+ // The except_flag will contain any exceptions caused by the instruction.
+ // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+ // exception is masked.
+ void EmitFPUTruncate(FPURoundingMode rounding_mode,
+ FPURegister result,
+ DoubleRegister double_input,
+ Register scratch1,
+ Register except_flag,
+ CheckForInexactConversion check_inexact
+ = kDontCheckForInexactConversion);
+
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
@@ -579,15 +754,6 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3);
- // -------------------------------------------------------------------------
- // Activation frames.
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
@@ -614,6 +780,7 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
+
// -------------------------------------------------------------------------
// JavaScript invokes.
@@ -645,7 +812,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind);
@@ -676,9 +843,9 @@ class MacroAssembler: public Assembler {
// Exception handling.
// Push a new try handler and link into try handler chain.
- // The return address must be passed in register ra.
- // Clobber t0, t1, t2.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ void PushTryHandler(CodeLocation try_location,
+ HandlerType type,
+ int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
@@ -702,6 +869,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// -------------------------------------------------------------------------
// Support functions.
@@ -713,7 +887,8 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
void GetObjectType(Register function,
Register map,
@@ -725,6 +900,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -754,6 +954,21 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // Returns a condition that will be enabled if the object was a string.
+ Condition IsObjectStringType(Register obj,
+ Register type,
+ Register result) {
+ lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ And(type, type, Operand(kIsNotStringMask));
+ ASSERT_EQ(0, kStringTag);
+ return eq;
+ }
+
+
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -827,27 +1042,9 @@ class MacroAssembler: public Assembler {
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 =
- Operand(zero_reg));
-
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 =
- Operand(zero_reg));
-
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
@@ -868,17 +1065,14 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC
- // failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
// Before calling a C-function from generated code, align arguments on stack
// and add space for the four mips argument slots.
// After aligning the frame, non-register arguments must be stored on the
@@ -888,7 +1082,11 @@ class MacroAssembler: public Assembler {
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
@@ -900,7 +1098,13 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
@@ -911,16 +1115,15 @@ class MacroAssembler: public Assembler {
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context.
- MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call js arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -976,6 +1179,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Number utilities.
@@ -1003,6 +1209,13 @@ class MacroAssembler: public Assembler {
Addu(reg, reg, reg);
}
+ // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+ void SmiTagCheckOverflow(Register reg, Register overflow) {
+ mov(overflow, reg); // Save original value.
+ addu(reg, reg, reg);
+ xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
+ }
+
void SmiTag(Register dst, Register src) {
Addu(dst, src, src);
}
@@ -1017,10 +1230,11 @@ class MacroAssembler: public Assembler {
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = at) {
+ Register scratch = at,
+ BranchDelaySlot bd = PROTECT) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
- Branch(smi_label, eq, scratch, Operand(zero_reg));
+ Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
// Jump if the register contains a non-smi.
@@ -1090,13 +1304,29 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* failure);
+ void ClampUint8(Register output_reg, Register input_reg);
+
+ void ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg);
+
+
void LoadInstanceDescriptors(Register map, Register descriptors);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Patch the relocated value (lui/ori pair).
+ void PatchRelocatedValue(Register li_location,
+ Register scratch,
+ Register new_value);
+
private:
void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments);
+ int num_reg_arguments,
+ int num_double_arguments);
void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1132,25 +1362,37 @@ class MacroAssembler: public Assembler {
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- bool UseAbsoluteCodePointers();
-
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -1191,34 +1433,6 @@ class CodePatcher {
};
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-static inline MemOperand CFunctionArgumentOperand(int index) {
- ASSERT(index > kCArgSlotCount);
- // Argument 5 takes the slot just past the four Arg-slots.
- int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
- return MemOperand(sp, offset);
-}
-
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 63e836f22..cb210fed0 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -377,9 +377,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address()));
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+ }
// Restore regexp engine registers.
__ MultiPop(regexp_registers_to_retain);
@@ -607,6 +610,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
@@ -1103,6 +1112,11 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
@@ -1244,13 +1258,14 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
// Stack is already aligned for call, so decrement by alignment
// to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment));
- __ sw(ra, MemOperand(sp, 0));
- __ mov(a0, sp);
+ __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+ const int return_address_offset = kCArgsSlotsSize;
+ __ Addu(a0, sp, return_address_offset);
+ __ sw(ra, MemOperand(a0, 0));
__ mov(t9, t1);
__ Call(t9);
- __ lw(ra, MemOperand(sp, 0));
- __ Addu(sp, sp, Operand(stack_alignment));
+ __ lw(ra, MemOperand(sp, return_address_offset));
+ __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
__ Jump(ra);
}
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 17c18977c..f70775d86 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -72,7 +72,7 @@ uint32_t get_fcsr_condition_bit(uint32_t cc) {
// code.
class MipsDebugger {
public:
- explicit MipsDebugger(Simulator* sim);
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
~MipsDebugger();
void Stop(Instruction* instr);
@@ -105,10 +105,6 @@ class MipsDebugger {
void RedoBreakpoints();
};
-MipsDebugger::MipsDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
MipsDebugger::~MipsDebugger() {
}
@@ -391,6 +387,13 @@ void MipsDebugger::Debug() {
if (line == NULL) {
break;
} else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int argc = SScanF(line,
@@ -757,7 +760,6 @@ void MipsDebugger::Debug() {
PrintF("Unknown command: %s\n", cmd);
}
}
- DeleteArray(line);
}
// Add all the breakpoints back to stop execution and enter the debugger
@@ -791,6 +793,12 @@ static bool AllOnOnePage(uintptr_t start, int size) {
}
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
size_t size) {
@@ -911,6 +919,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumExceptions; i++) {
exceptions[i] = 0;
}
+
+ last_debugger_input_ = NULL;
}
@@ -1359,9 +1369,9 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+ // Leave a safety margin of 512 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 256;
+ return reinterpret_cast<uintptr_t>(stack_) + 512;
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 69dddfad3..ba625f45b 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -221,6 +221,10 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
@@ -358,6 +362,9 @@ class Simulator {
int icount_;
int break_count_;
+ // Debugger input.
+ char* last_debugger_input_;
+
// Icache simulation.
v8::internal::HashMap* i_cache_;
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 5b949734f..76452f0c9 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -99,13 +99,12 @@ static void ProbeTable(Isolate* isolate,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsSymbol());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -120,9 +119,8 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
Register map = scratch1;
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ Branch(miss_label, ne, at, Operand(zero_reg));
-
+ __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -140,20 +138,16 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
// Restore the temporarily used register.
__ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- if (result->IsFailure()) return result;
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- return result;
}
@@ -240,7 +234,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -248,8 +245,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ li(at, isolate->global());
__ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -261,8 +258,10 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -283,8 +282,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ And(scratch, receiver, Operand(kSmiTagMask));
- __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ GetObjectType(receiver, scratch, scratch);
@@ -370,9 +368,9 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -397,11 +395,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
- __ li(a2, Operand(Handle<Map>(transition)));
+ __ li(a2, Operand(transition));
__ Push(a2, a0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -410,10 +408,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
- __ li(t0, Operand(Handle<Map>(transition)));
+ __ li(t0, Operand(transition));
__ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
}
@@ -432,7 +430,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ __ mov(name_reg, a0);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -445,7 +449,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ __ mov(name_reg, a0);
+ __ RecordWriteField(scratch,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Return the value (register v0).
@@ -457,20 +467,15 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
static void GenerateCallFunction(MacroAssembler* masm,
- Object* object,
+ Handle<Object> object,
const ParameterCount& arguments,
Label* miss,
Code::ExtraICState extra_ic_state) {
@@ -502,23 +507,24 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ li(scratch, Operand(Handle<Object>(interceptor)));
+ __ li(scratch, Operand(interceptor));
__ Push(scratch, receiver, holder);
__ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch);
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -554,7 +560,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
// ----------- S t a t e -------------
@@ -567,18 +573,18 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ li(t1, Operand(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ li(t1, Operand(function));
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ li(a0, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ li(a0, api_call_info);
__ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
} else {
- __ li(t2, Operand(Handle<Object>(call_data)));
+ __ li(t2, call_data);
}
// Store js function and call data.
@@ -589,12 +595,9 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// (refer to layout above).
__ Addu(a2, sp, Operand(2 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiStackSpace = 4;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -617,16 +620,15 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// v8::Arguments::is_construct_call = 0
__ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
- return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
class CallInterceptorCompiler BASE_EMBEDDED {
@@ -640,86 +642,63 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
-
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value();
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
-
Counters* counters = masm->isolate()->counters();
-
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
__ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
+ scratch1, scratch2);
if (can_do_fast_api_call) {
__ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -732,9 +711,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -747,10 +726,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -761,10 +741,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiDirectCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -785,66 +762,57 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
-
- return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
// Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
5);
-
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
-
- __ Push(holder, name_);
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ __ Push(holder, name_);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ }
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
@@ -861,52 +829,41 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
- __ li(scratch, Operand(Handle<Object>(cell)));
+ __ li(scratch, Operand(cell));
__ lw(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
- return cell;
}
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = GenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
}
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
}
- return NULL;
}
@@ -1030,13 +987,13 @@ static void GenerateUInt2Double(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between holder and object registers.
@@ -1054,81 +1011,51 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
+ name = factory()->LookupSymbol(name);
}
- ASSERT(current->property_dictionary()->FindEntry(name) ==
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
__ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now the object is in holder_reg.
+ reg = holder_reg; // From now on the object will be in holder_reg.
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
+ } else {
+ Handle<Map> current_map(current->map());
__ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-
// Branch on the result of the map check.
- __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
+ __ Branch(miss, ne, scratch1, Operand(current_map));
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
}
+ reg = holder_reg; // From now on the object will be in holder_reg.
- reg = holder_reg; // From now the object is in holder_reg.
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- // Branch on the result of the map check.
- __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ li(reg, Operand(prototype));
}
- // The prototype is in old space; load it directly.
- reg = holder_reg; // From now the object is in holder_reg.
- __ li(reg, Operand(Handle<JSObject>(prototype)));
}
if (save_at_depth == depth) {
@@ -1139,65 +1066,57 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
current = prototype;
}
+ // Log the check depth.
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+
// Check the holder map.
__ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
- };
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
+ }
- MaybeObject* result = GenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ And(scratch1, receiver, Operand(kSmiTagMask));
- __ Branch(miss, eq, scratch1, Operand(zero_reg));
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
__ Ret();
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss, scratch1);
@@ -1208,83 +1127,77 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ li(v0, Operand(Handle<Object>(value)));
+ __ li(v0, Operand(value));
__ Ret();
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss, scratch1);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- Handle<AccessorInfo> callback_handle(callback);
- if (heap()->InNewSpace(callback_handle->data())) {
- __ li(scratch3, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ li(scratch3, callback);
__ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
- __ li(scratch3, Handle<Object>(callback_handle->data()));
+ __ li(scratch3, Handle<Object>(callback->data()));
}
__ Push(reg, scratch3, name_reg);
__ mov(a2, scratch2); // Saved in case scratch2 == a1.
__ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
// struct from the function (which is currently the case). This means we pass
// the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
// will handle setting up a0.
const int kApiStackSpace = 1;
-
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
+
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
// a2 (second argument - see note above) = AccessorInfo&
__ Addu(a2, sp, kPointerSize);
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
+ const int kStackUnwindSpace = 4;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_GETTER_CALL,
masm()->isolate());
- // 4 args - will be freed later by LeaveExitFrame.
- return masm()->TryCallApiFunctionAndReturn(ref, 4);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1300,9 +1213,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1317,47 +1230,44 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method).
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
- __ LeaveInternalFrame();
- __ Ret();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+ // Leave the internal frame.
}
-
- __ LeaveInternalFrame();
-
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1369,21 +1279,21 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), v0, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ Ret();
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
- __ li(scratch2, Handle<AccessorInfo>(callback));
+ __ li(scratch2, callback);
// holder_reg is either receiver or scratch1.
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
@@ -1419,16 +1329,16 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+ __ Branch(miss, ne, a2, Operand(name));
}
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1441,7 +1351,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(a0, miss);
}
@@ -1450,15 +1360,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ li(a3, Operand(cell));
__ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1473,27 +1384,24 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Branch(miss, ne, t0, Operand(a3));
} else {
- __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+ __ Branch(miss, ne, a1, Operand(function));
}
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
+ extra_state_);
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1513,23 +1421,23 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1539,7 +1447,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
@@ -1555,8 +1463,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), receiver,
- holder, a3, v0, t0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
+ name, &miss);
if (argc == 0) {
// Nothing to do, just return the length.
@@ -1565,10 +1473,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Ret();
} else {
Label call_builtin;
-
Register elements = a3;
Register end_elements = t1;
-
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1580,7 +1486,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements;
// Get the array's length into v0 and calculate new length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1594,29 +1500,51 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+ // Check if value is a smi.
+ Label with_write_barrier;
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(t0, &with_write_barrier);
+
// Save new length.
__ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
- __ Addu(end_elements, end_elements, kPointerSize);
+ __ Addu(end_elements, end_elements, kEndElementsOffset);
+ __ sw(t0, MemOperand(end_elements));
// Check for a smi.
- __ JumpIfNotSmi(t0, &with_write_barrier);
- __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
- __ InNewSpace(elements, t0, eq, &exit);
- __ RecordWriteHelper(elements, end_elements, t0);
+
+ __ lw(t2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(t2, t2, &call_builtin);
+
+ // Save new length.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, kEndElementsOffset);
+ __ sw(t0, MemOperand(end_elements));
+
+ __ RecordWrite(elements,
+ end_elements,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Ret();
@@ -1628,6 +1556,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Branch(&call_builtin);
}
+ __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(a2, &no_fast_elements_check);
+ __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(t3, t3, &call_builtin);
+ __ bind(&no_fast_elements_check);
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(
masm()->isolate());
@@ -1653,8 +1590,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ sw(t2, MemOperand(t3));
// Push the argument.
- __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
- __ sw(t2, MemOperand(end_elements));
+ __ sw(a2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -1679,19 +1615,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -1701,25 +1637,22 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
Register receiver = a1;
Register elements = a3;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object),
- receiver, holder, elements, t0, v0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+ t0, v0, name, &miss);
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1768,20 +1701,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -1791,10 +1723,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -1802,7 +1733,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1814,13 +1745,12 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Context::STRING_FUNCTION_INDEX,
v0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
- a1, a3, t0, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = a1;
Register index = t1;
- Register scratch = a3;
Register result = v0;
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1829,20 +1759,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1853,22 +1782,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in a2.
- __ li(a2, Handle<String>(name));
+ __ li(a2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -1878,21 +1806,18 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1900,14 +1825,13 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Context::STRING_FUNCTION_INDEX,
v0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
- a1, a3, t0, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = v0;
Register index = t1;
- Register scratch1 = a1;
- Register scratch2 = a3;
+ Register scratch = a3;
Register result = v0;
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1916,21 +1840,20 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1941,22 +1864,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in a2.
- __ li(a2, Handle<String>(name));
+ __ li(a2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -1969,22 +1891,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a1, &miss);
- CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2000,13 +1923,13 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ And(code, code, Operand(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, v0);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, v0);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -2015,19 +1938,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// a2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2036,30 +1959,29 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(FPU))
- return heap()->undefined_value();
- CpuFeatures::Scope scope_fpu(FPU);
+ if (!CpuFeatures::IsSupported(FPU)) {
+ return Handle<Code>::null();
+ }
+ CpuFeatures::Scope scope_fpu(FPU);
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2149,19 +2071,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// a2: function name.
- MaybeObject* obj = GenerateMissBranch();
- if (obj->IsFailure()) return obj;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : function name
// -- ra : return address
@@ -2171,25 +2093,23 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -----------------------------------
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ GenerateNameCheck(name, &miss);
+ if (cell.is_null()) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2251,33 +2171,32 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// a2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
@@ -2296,40 +2215,37 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), a0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
depth, &miss);
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm(), optimization, argc);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // Undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
@@ -2342,23 +2258,20 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ And(t1, a1, Operand(kSmiTagMask));
- __ Branch(&miss, eq, t1, Operand(zero_reg));
+ __ JumpIfSmi(a1, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(masm()->isolate()->counters()->call_const(),
1, a0, a3);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+ name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2369,50 +2282,46 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a two-byte string or a symbol.
__ GetObjectType(a1, a3, a3);
__ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
- a1, t0, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ a0, holder, a3, a1, t0, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
- __ And(t1, a1, Operand(kSmiTagMask));
- __ Branch(&fast, eq, t1, Operand(zero_reg));
+ __ JumpIfSmi(a1, &fast);
__ GetObjectType(a1, a0, a0);
__ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
- a1, t0, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ a0, holder, a3, a1, t0, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
@@ -2423,17 +2332,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
- a1, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ a0, holder, a3, a1, t0, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
}
- default:
- UNREACHABLE();
- }
-
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
@@ -2441,17 +2351,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -2463,71 +2372,54 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the number of arguments.
const int argc = arguments().immediate();
-
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ lw(a1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- a1,
- a3,
- t0,
- a0,
- &miss);
- if (result->IsFailure()) {
- return result;
- }
+ CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
+ compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
+ &miss);
// Move returned value, the function to call, to a1.
__ mov(a1, v0);
// Restore receiver.
__ lw(a0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // Undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
@@ -2544,34 +2436,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- UNIMPLEMENTED_MIPS();
- } else {
- __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
- JUMP_FUNCTION, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2581,25 +2470,21 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
Label miss;
// Name register might be clobbered.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- a1, a2, a3,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
__ bind(&miss);
__ li(a2, Operand(Handle<String>(name))); // Restore name.
Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2625,7 +2510,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ push(a1); // Receiver.
- __ li(a3, Operand(Handle<AccessorInfo>(callback))); // Callback info.
+ __ li(a3, Operand(callback)); // Callback info.
__ Push(a3, a2, a0);
// Do tail-call to the runtime system.
@@ -2644,8 +2529,9 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2691,9 +2577,10 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2710,7 +2597,7 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ li(t0, Operand(cell));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
__ Branch(&miss, eq, t1, Operand(t2));
@@ -2718,6 +2605,16 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
+
+ // This trashes a0 but the value is returned in v0 anyway.
+ __ RecordWriteField(t0,
+ JSGlobalPropertyCell::kValueOffset,
+ a0,
+ a2,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
__ Ret();
@@ -2733,9 +2630,9 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- ra : return address
@@ -2751,15 +2648,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- a1,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
}
// Return undefined if maps of the full prototype chain is still the same.
@@ -2770,14 +2660,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -2796,24 +2686,19 @@ MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
// -- ra : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
- callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2822,10 +2707,10 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -2842,9 +2727,9 @@ MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -2853,17 +2738,9 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
// -----------------------------------
Label miss;
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object,
- holder,
- &lookup,
- a0,
- a2,
- a3,
- a1,
- t0,
- name,
+ GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2873,11 +2750,12 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -2888,16 +2766,15 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
- __ And(t0, a0, Operand(kSmiTagMask));
- __ Branch(&miss, eq, t0, Operand(zero_reg));
+ if (!object.is_identical_to(holder)) {
+ __ JumpIfSmi(a0, &miss);
}
// Check that the map of the global has not changed.
CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
// Get the value from the cell.
- __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ li(a3, Operand(cell));
__ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -2920,9 +2797,9 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- ra : return address
@@ -2932,7 +2809,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
__ bind(&miss);
@@ -2942,11 +2819,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -2955,15 +2832,10 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
-
- MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
- t0, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
+ __ Branch(&miss, ne, a0, Operand(name));
+ GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2971,10 +2843,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -2983,7 +2856,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
__ bind(&miss);
@@ -2994,9 +2867,10 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3005,19 +2879,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- a1,
- a0,
- a2,
- a3,
- t0,
- name,
+ GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3026,7 +2892,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3035,7 +2902,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
Label miss;
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
GenerateLoadArrayLength(masm(), a1, a2, &miss);
__ bind(&miss);
@@ -3045,7 +2912,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3057,7 +2925,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
// Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
__ bind(&miss);
@@ -3069,7 +2937,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3081,7 +2950,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
// Check the name hasn't changed.
- __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a0, Operand(name));
GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
__ bind(&miss);
@@ -3092,33 +2961,29 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(a1,
- a2,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3130,9 +2995,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
int receiver_count = receiver_maps->length();
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+ __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
+ eq, a2, Operand(receiver_maps->at(current)));
}
__ bind(&miss);
@@ -3140,14 +3004,14 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -3161,16 +3025,11 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
// Check that the name has not changed.
- __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+ __ Branch(&miss, ne, a1, Operand(name));
// a3 is used as scratch register. a1 and a2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- a2, a1, a3,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
@@ -3178,11 +3037,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -3190,29 +3050,25 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// -- ra : return address
// -- a3 : scratch
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(a2,
- a3,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+ __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -3225,10 +3081,17 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
int receiver_count = receiver_maps->length();
__ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+ for (int i = 0; i < receiver_count; ++i) {
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
+ a3, Operand(receiver_maps->at(i)));
+ } else {
+ Label next_map;
+ __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
+ __ li(a3, Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -3236,11 +3099,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// a0 : argc
// a1 : constructor
// ra : return address
@@ -3263,8 +3127,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// a1: constructor function
// t7: undefined
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+ __ JumpIfSmi(a2, &generic_stub_call);
__ GetObjectType(a2, a3, t0);
__ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
@@ -3285,12 +3148,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// a2: initial map
// t7: undefined
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3,
- t4,
- t5,
- t6,
- &generic_stub_call,
- SIZE_IN_WORDS);
+ __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -3325,7 +3183,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// t7: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed, next;
@@ -3457,6 +3315,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -3553,6 +3412,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3795,9 +3655,9 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -3828,7 +3688,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the index is in range.
- __ SmiUntag(t0, key);
__ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3836,7 +3695,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// a3: external array.
- // t0: key (integer).
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
@@ -3848,7 +3706,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage.
- // t0: key (integer).
// t1: value (integer).
switch (elements_kind) {
@@ -3865,33 +3722,36 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ mov(v0, t1); // Value is in range 0..255.
__ bind(&done);
__ mov(t1, v0);
- __ addu(t8, a3, t0);
+
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t1, MemOperand(t8, 0));
}
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ addu(t8, a3, t0);
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t1, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sll(t8, t0, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, key);
__ sh(t1, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ sw(t1, MemOperand(t8, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
+ __ SmiUntag(t0, key);
StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, t0, 3);
+ __ sll(t8, key, 2);
__ addu(a3, a3, t8);
// a3: effective address of the double element
FloatingPointHelper::Destination destination;
@@ -3913,6 +3773,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3921,12 +3782,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
// Entry registers are intact, a0 holds the value which is the return value.
- __ mov(v0, value);
+ __ mov(v0, a0);
__ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// a3: external array.
- // t0: index (integer).
__ bind(&check_heap_number);
__ GetObjectType(value, t1, t2);
__ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -3934,7 +3794,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage.
- // t0: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
@@ -3947,11 +3806,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(f0, f0);
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ swc1(f0, MemOperand(t8, 0));
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, t0, 3);
+ __ sll(t8, key, 2);
__ addu(t8, a3, t8);
__ sdc1(f0, MemOperand(t8, 0));
} else {
@@ -3960,18 +3819,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ addu(t8, a3, t0);
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t3, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sll(t8, t0, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, key);
__ sh(t3, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
@@ -3979,6 +3838,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3989,7 +3849,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Entry registers are intact, a0 holds the value
// which is the return value.
- __ mov(v0, value);
+ __ mov(v0, a0);
__ Ret();
} else {
// FPU is not available, do manual conversions.
@@ -4044,13 +3904,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ or_(t3, t7, t6);
__ bind(&done);
- __ sll(t9, a1, 2);
+ __ sll(t9, key, 1);
__ addu(t9, a2, t9);
__ sw(t3, MemOperand(t9, 0));
// Entry registers are intact, a0 holds the value which is the return
// value.
- __ mov(v0, value);
+ __ mov(v0, a0);
__ Ret();
__ bind(&nan_or_infinity_or_zero);
@@ -4068,6 +3928,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// t8: effective address of destination element.
__ sw(t4, MemOperand(t8, 0));
__ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+ __ mov(v0, a0);
__ Ret();
} else {
bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -4130,18 +3991,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ addu(t8, a3, t0);
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t3, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sll(t8, t0, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, key);
__ sh(t3, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
@@ -4149,6 +4010,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4223,9 +4085,9 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
__ Ret();
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -4298,8 +4160,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -4308,7 +4172,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// -- a3 : scratch
// -- a4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = a0;
Register key_reg = a1;
@@ -4342,14 +4206,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// Compare smis.
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- __ Addu(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch3, scratch2, scratch);
- __ sw(value_reg, MemOperand(scratch3));
- __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
-
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ __ Addu(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, scratch, scratch2);
+ __ sw(value_reg, MemOperand(scratch));
+ } else {
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ Addu(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, scratch, scratch2);
+ __ sw(value_reg, MemOperand(scratch));
+ __ mov(receiver_reg, value_reg);
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ RecordWrite(elements_reg, // Object.
+ scratch, // Address.
+ receiver_reg, // Value.
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
// value_reg (a0) is preserved.
// Done.
__ Ret();
@@ -4358,6 +4240,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4375,15 +4261,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = a0;
Register key_reg = a1;
Register receiver_reg = a2;
- Register scratch = a3;
- Register elements_reg = t0;
- Register mantissa_reg = t1;
- Register exponent_reg = t2;
+ Register elements_reg = a3;
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
Register scratch4 = t3;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4395,90 +4281,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ lw(scratch,
+ __ lw(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-
- // Handle smi values specially.
- __ JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- __ CheckMap(value_reg,
- scratch,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
+
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ receiver_reg,
+ elements_reg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ &transition_elements_kind);
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
-
- __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- __ bind(&have_double_value);
- __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(scratch, elements_reg, Operand(scratch4));
- __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ sw(exponent_reg, FieldMemOperand(scratch, offset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value_reg); // In delay slot.
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
- __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
-
- __ bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- __ jmp(&have_double_value);
-
- __ bind(&smi_value);
- __ Addu(scratch, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch4);
- // scratch is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- __ SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(
- masm,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(f0, MemOperand(scratch, 0));
- } else {
- __ sw(mantissa_reg, MemOperand(scratch, 0));
- __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
- }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value_reg); // In delay slot.
@@ -4487,6 +4308,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index e3f3c48bb..0944b719f 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -225,7 +225,7 @@ ScopeType = { Global: 0,
*/
function Mirror(type) {
this.type_ = type;
-};
+}
Mirror.prototype.type = function() {
@@ -239,7 +239,7 @@ Mirror.prototype.type = function() {
*/
Mirror.prototype.isValue = function() {
return this instanceof ValueMirror;
-}
+};
/**
@@ -248,7 +248,7 @@ Mirror.prototype.isValue = function() {
*/
Mirror.prototype.isUndefined = function() {
return this instanceof UndefinedMirror;
-}
+};
/**
@@ -257,7 +257,7 @@ Mirror.prototype.isUndefined = function() {
*/
Mirror.prototype.isNull = function() {
return this instanceof NullMirror;
-}
+};
/**
@@ -266,7 +266,7 @@ Mirror.prototype.isNull = function() {
*/
Mirror.prototype.isBoolean = function() {
return this instanceof BooleanMirror;
-}
+};
/**
@@ -275,7 +275,7 @@ Mirror.prototype.isBoolean = function() {
*/
Mirror.prototype.isNumber = function() {
return this instanceof NumberMirror;
-}
+};
/**
@@ -284,7 +284,7 @@ Mirror.prototype.isNumber = function() {
*/
Mirror.prototype.isString = function() {
return this instanceof StringMirror;
-}
+};
/**
@@ -293,7 +293,7 @@ Mirror.prototype.isString = function() {
*/
Mirror.prototype.isObject = function() {
return this instanceof ObjectMirror;
-}
+};
/**
@@ -302,7 +302,7 @@ Mirror.prototype.isObject = function() {
*/
Mirror.prototype.isFunction = function() {
return this instanceof FunctionMirror;
-}
+};
/**
@@ -311,7 +311,7 @@ Mirror.prototype.isFunction = function() {
*/
Mirror.prototype.isUnresolvedFunction = function() {
return this instanceof UnresolvedFunctionMirror;
-}
+};
/**
@@ -320,7 +320,7 @@ Mirror.prototype.isUnresolvedFunction = function() {
*/
Mirror.prototype.isArray = function() {
return this instanceof ArrayMirror;
-}
+};
/**
@@ -329,7 +329,7 @@ Mirror.prototype.isArray = function() {
*/
Mirror.prototype.isDate = function() {
return this instanceof DateMirror;
-}
+};
/**
@@ -338,7 +338,7 @@ Mirror.prototype.isDate = function() {
*/
Mirror.prototype.isRegExp = function() {
return this instanceof RegExpMirror;
-}
+};
/**
@@ -347,7 +347,7 @@ Mirror.prototype.isRegExp = function() {
*/
Mirror.prototype.isError = function() {
return this instanceof ErrorMirror;
-}
+};
/**
@@ -356,7 +356,7 @@ Mirror.prototype.isError = function() {
*/
Mirror.prototype.isProperty = function() {
return this instanceof PropertyMirror;
-}
+};
/**
@@ -365,7 +365,7 @@ Mirror.prototype.isProperty = function() {
*/
Mirror.prototype.isFrame = function() {
return this instanceof FrameMirror;
-}
+};
/**
@@ -374,7 +374,7 @@ Mirror.prototype.isFrame = function() {
*/
Mirror.prototype.isScript = function() {
return this instanceof ScriptMirror;
-}
+};
/**
@@ -383,7 +383,7 @@ Mirror.prototype.isScript = function() {
*/
Mirror.prototype.isContext = function() {
return this instanceof ContextMirror;
-}
+};
/**
@@ -392,7 +392,7 @@ Mirror.prototype.isContext = function() {
*/
Mirror.prototype.isScope = function() {
return this instanceof ScopeMirror;
-}
+};
/**
@@ -400,7 +400,7 @@ Mirror.prototype.isScope = function() {
*/
Mirror.prototype.allocateHandle_ = function() {
this.handle_ = next_handle_++;
-}
+};
/**
@@ -409,13 +409,13 @@ Mirror.prototype.allocateHandle_ = function() {
*/
Mirror.prototype.allocateTransientHandle_ = function() {
this.handle_ = next_transient_handle_--;
-}
+};
Mirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass.
return "#<" + this.constructor.name + ">";
-}
+};
/**
@@ -480,7 +480,7 @@ inherits(UndefinedMirror, ValueMirror);
UndefinedMirror.prototype.toText = function() {
return 'undefined';
-}
+};
/**
@@ -496,7 +496,7 @@ inherits(NullMirror, ValueMirror);
NullMirror.prototype.toText = function() {
return 'null';
-}
+};
/**
@@ -513,7 +513,7 @@ inherits(BooleanMirror, ValueMirror);
BooleanMirror.prototype.toText = function() {
return this.value_ ? 'true' : 'false';
-}
+};
/**
@@ -530,7 +530,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
return %NumberToString(this.value_);
-}
+};
/**
@@ -555,11 +555,11 @@ StringMirror.prototype.getTruncatedValue = function(maxLength) {
'... (length: ' + this.length() + ')';
}
return this.value_;
-}
+};
StringMirror.prototype.toText = function() {
return this.getTruncatedValue(kMaxProtocolStringLength);
-}
+};
/**
@@ -898,7 +898,7 @@ FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
FunctionMirror.prototype.toText = function() {
return this.source();
-}
+};
/**
@@ -951,7 +951,7 @@ UnresolvedFunctionMirror.prototype.inferredName = function() {
UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
return [];
-}
+};
/**
@@ -971,7 +971,8 @@ ArrayMirror.prototype.length = function() {
};
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
+ opt_to_index) {
var from_index = opt_from_index || 0;
var to_index = opt_to_index || this.length() - 1;
if (from_index > to_index) return new Array();
@@ -987,7 +988,7 @@ ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_
values[i - from_index] = value;
}
return values;
-}
+};
/**
@@ -1005,7 +1006,7 @@ inherits(DateMirror, ObjectMirror);
DateMirror.prototype.toText = function() {
var s = JSON.stringify(this.value_);
return s.substring(1, s.length - 1); // cut quotes
-}
+};
/**
@@ -1059,7 +1060,7 @@ RegExpMirror.prototype.multiline = function() {
RegExpMirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass.
return "/" + this.source() + "/";
-}
+};
/**
@@ -1087,12 +1088,12 @@ ErrorMirror.prototype.toText = function() {
// Use the same text representation as in messages.js.
var text;
try {
- str = %_CallFunction(this.value_, builtins.errorToString);
+ str = %_CallFunction(this.value_, builtins.ErrorToString);
} catch (e) {
str = '#<Error>';
}
return str;
-}
+};
/**
@@ -1110,7 +1111,7 @@ function PropertyMirror(mirror, name, details) {
this.value_ = details[0];
this.details_ = details[1];
if (details.length > 2) {
- this.exception_ = details[2]
+ this.exception_ = details[2];
this.getter_ = details[3];
this.setter_ = details[4];
}
@@ -1120,22 +1121,22 @@ inherits(PropertyMirror, Mirror);
PropertyMirror.prototype.isReadOnly = function() {
return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-}
+};
PropertyMirror.prototype.isEnum = function() {
return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-}
+};
PropertyMirror.prototype.canDelete = function() {
return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-}
+};
PropertyMirror.prototype.name = function() {
return this.name_;
-}
+};
PropertyMirror.prototype.isIndexed = function() {
@@ -1145,12 +1146,12 @@ PropertyMirror.prototype.isIndexed = function() {
}
}
return true;
-}
+};
PropertyMirror.prototype.value = function() {
return MakeMirror(this.value_, false);
-}
+};
/**
@@ -1159,22 +1160,22 @@ PropertyMirror.prototype.value = function() {
*/
PropertyMirror.prototype.isException = function() {
return this.exception_ ? true : false;
-}
+};
PropertyMirror.prototype.attributes = function() {
return %DebugPropertyAttributesFromDetails(this.details_);
-}
+};
PropertyMirror.prototype.propertyType = function() {
return %DebugPropertyTypeFromDetails(this.details_);
-}
+};
PropertyMirror.prototype.insertionIndex = function() {
return %DebugPropertyIndexFromDetails(this.details_);
-}
+};
/**
@@ -1183,7 +1184,7 @@ PropertyMirror.prototype.insertionIndex = function() {
*/
PropertyMirror.prototype.hasGetter = function() {
return this.getter_ ? true : false;
-}
+};
/**
@@ -1192,7 +1193,7 @@ PropertyMirror.prototype.hasGetter = function() {
*/
PropertyMirror.prototype.hasSetter = function() {
return this.setter_ ? true : false;
-}
+};
/**
@@ -1206,7 +1207,7 @@ PropertyMirror.prototype.getter = function() {
} else {
return GetUndefinedMirror();
}
-}
+};
/**
@@ -1220,7 +1221,7 @@ PropertyMirror.prototype.setter = function() {
} else {
return GetUndefinedMirror();
}
-}
+};
/**
@@ -1233,7 +1234,7 @@ PropertyMirror.prototype.isNative = function() {
return (this.propertyType() == PropertyType.Interceptor) ||
((this.propertyType() == PropertyType.Callbacks) &&
!this.hasGetter() && !this.hasSetter());
-}
+};
const kFrameDetailsFrameIdIndex = 0;
@@ -1284,63 +1285,63 @@ function FrameDetails(break_id, index) {
FrameDetails.prototype.frameId = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsFrameIdIndex];
-}
+};
FrameDetails.prototype.receiver = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsReceiverIndex];
-}
+};
FrameDetails.prototype.func = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsFunctionIndex];
-}
+};
FrameDetails.prototype.isConstructCall = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsConstructCallIndex];
-}
+};
FrameDetails.prototype.isAtReturn = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsAtReturnIndex];
-}
+};
FrameDetails.prototype.isDebuggerFrame = function() {
%CheckExecutionState(this.break_id_);
var f = kFrameDetailsFlagDebuggerFrameMask;
return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-}
+};
FrameDetails.prototype.isOptimizedFrame = function() {
%CheckExecutionState(this.break_id_);
var f = kFrameDetailsFlagOptimizedFrameMask;
return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-}
+};
FrameDetails.prototype.isInlinedFrame = function() {
return this.inlinedFrameIndex() > 0;
-}
+};
FrameDetails.prototype.inlinedFrameIndex = function() {
%CheckExecutionState(this.break_id_);
var f = kFrameDetailsFlagInlinedFrameIndexMask;
- return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2
-}
+ return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2;
+};
FrameDetails.prototype.argumentCount = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsArgumentCountIndex];
-}
+};
FrameDetails.prototype.argumentName = function(index) {
@@ -1348,9 +1349,9 @@ FrameDetails.prototype.argumentName = function(index) {
if (index >= 0 && index < this.argumentCount()) {
return this.details_[kFrameDetailsFirstDynamicIndex +
index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex]
+ kFrameDetailsNameIndex];
}
-}
+};
FrameDetails.prototype.argumentValue = function(index) {
@@ -1358,45 +1359,45 @@ FrameDetails.prototype.argumentValue = function(index) {
if (index >= 0 && index < this.argumentCount()) {
return this.details_[kFrameDetailsFirstDynamicIndex +
index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex]
+ kFrameDetailsValueIndex];
}
-}
+};
FrameDetails.prototype.localCount = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsLocalCountIndex];
-}
+};
FrameDetails.prototype.sourcePosition = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsSourcePositionIndex];
-}
+};
FrameDetails.prototype.localName = function(index) {
%CheckExecutionState(this.break_id_);
if (index >= 0 && index < this.localCount()) {
var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize
+ this.argumentCount() * kFrameDetailsNameValueSize;
return this.details_[locals_offset +
index * kFrameDetailsNameValueSize +
- kFrameDetailsNameIndex]
+ kFrameDetailsNameIndex];
}
-}
+};
FrameDetails.prototype.localValue = function(index) {
%CheckExecutionState(this.break_id_);
if (index >= 0 && index < this.localCount()) {
var locals_offset = kFrameDetailsFirstDynamicIndex +
- this.argumentCount() * kFrameDetailsNameValueSize
+ this.argumentCount() * kFrameDetailsNameValueSize;
return this.details_[locals_offset +
index * kFrameDetailsNameValueSize +
- kFrameDetailsValueIndex]
+ kFrameDetailsValueIndex];
}
-}
+};
FrameDetails.prototype.returnValue = function() {
@@ -1407,12 +1408,12 @@ FrameDetails.prototype.returnValue = function() {
if (this.details_[kFrameDetailsAtReturnIndex]) {
return this.details_[return_value_offset];
}
-}
+};
FrameDetails.prototype.scopeCount = function() {
return %GetScopeCount(this.break_id_, this.frameId());
-}
+};
/**
@@ -1575,7 +1576,8 @@ FrameMirror.prototype.scope = function(index) {
};
-FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
+FrameMirror.prototype.evaluate = function(source, disable_break,
+ opt_context_object) {
var result = %DebugEvaluate(this.break_id_,
this.details_.frameId(),
this.details_.inlinedFrameIndex(),
@@ -1599,7 +1601,8 @@ FrameMirror.prototype.invocationText = function() {
result += '[debugger]';
} else {
// If the receiver has a className which is 'global' don't display it.
- var display_receiver = !receiver.className || receiver.className() != 'global';
+ var display_receiver =
+ !receiver.className || (receiver.className() != 'global');
if (display_receiver) {
result += receiver.toText();
}
@@ -1661,7 +1664,7 @@ FrameMirror.prototype.invocationText = function() {
}
return result;
-}
+};
FrameMirror.prototype.sourceAndPositionText = function() {
@@ -1693,13 +1696,13 @@ FrameMirror.prototype.sourceAndPositionText = function() {
}
return result;
-}
+};
FrameMirror.prototype.localsText = function() {
// Format local variables.
var result = '';
- var locals_count = this.localCount()
+ var locals_count = this.localCount();
if (locals_count > 0) {
for (var i = 0; i < locals_count; ++i) {
result += ' var ';
@@ -1711,7 +1714,7 @@ FrameMirror.prototype.localsText = function() {
}
return result;
-}
+};
FrameMirror.prototype.toText = function(opt_locals) {
@@ -1726,7 +1729,7 @@ FrameMirror.prototype.toText = function(opt_locals) {
result += this.localsText();
}
return result;
-}
+};
const kScopeDetailsTypeIndex = 0;
@@ -1744,13 +1747,13 @@ function ScopeDetails(frame, index) {
ScopeDetails.prototype.type = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kScopeDetailsTypeIndex];
-}
+};
ScopeDetails.prototype.object = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kScopeDetailsObjectIndex];
-}
+};
/**
@@ -1862,12 +1865,12 @@ ScriptMirror.prototype.lineCount = function() {
ScriptMirror.prototype.locationFromPosition = function(
position, include_resource_offset) {
return this.script_.locationFromPosition(position, include_resource_offset);
-}
+};
ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
return this.script_.sourceSlice(opt_from_line, opt_to_line);
-}
+};
ScriptMirror.prototype.context = function() {
@@ -1907,7 +1910,7 @@ ScriptMirror.prototype.toText = function() {
}
result += ')';
return result;
-}
+};
/**
@@ -1965,7 +1968,7 @@ function JSONProtocolSerializer(details, options) {
*/
JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
return this.serialize_(mirror, true, true);
-}
+};
/**
@@ -1978,7 +1981,7 @@ JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
var json = this.serialize_(mirror, false, true);
return json;
-}
+};
/**
@@ -2000,17 +2003,17 @@ JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
}
return content;
-}
+};
JSONProtocolSerializer.prototype.includeSource_ = function() {
return this.options_ && this.options_.includeSource;
-}
+};
JSONProtocolSerializer.prototype.inlineRefs_ = function() {
return this.options_ && this.options_.inlineRefs;
-}
+};
JSONProtocolSerializer.prototype.maxStringLength_ = function() {
@@ -2019,7 +2022,7 @@ JSONProtocolSerializer.prototype.maxStringLength_ = function() {
return kMaxProtocolStringLength;
}
return this.options_.maxStringLength;
-}
+};
JSONProtocolSerializer.prototype.add_ = function(mirror) {
@@ -2032,7 +2035,7 @@ JSONProtocolSerializer.prototype.add_ = function(mirror) {
// Add the mirror to the list of mirrors to be serialized.
this.mirrors_.push(mirror);
-}
+};
/**
@@ -2139,7 +2142,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
break;
case PROPERTY_TYPE:
- throw new Error('PropertyMirror cannot be serialized independeltly')
+ throw new Error('PropertyMirror cannot be serialized independeltly');
break;
case FRAME_TYPE:
@@ -2179,7 +2182,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
mirror.evalFromScript()) {
content.evalFromScript =
this.serializeReference(mirror.evalFromScript());
- var evalFromLocation = mirror.evalFromLocation()
+ var evalFromLocation = mirror.evalFromLocation();
if (evalFromLocation) {
content.evalFromLocation = { line: evalFromLocation.line,
column: evalFromLocation.column };
@@ -2203,7 +2206,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
// Create and return the JSON string.
return content;
-}
+};
/**
@@ -2278,7 +2281,7 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
}
content.properties = p;
-}
+};
/**
@@ -2342,7 +2345,7 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
result.ref = propertyValue.handle();
}
return result;
-}
+};
JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
@@ -2362,7 +2365,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
var x = new Array(mirror.argumentCount());
for (var i = 0; i < mirror.argumentCount(); i++) {
var arg = {};
- var argument_name = mirror.argumentName(i)
+ var argument_name = mirror.argumentName(i);
if (argument_name) {
arg.name = argument_name;
}
@@ -2392,7 +2395,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
index: i
});
}
-}
+};
JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
@@ -2402,7 +2405,7 @@ JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
content.object = this.inlineRefs_() ?
this.serializeValue(mirror.scopeObject()) :
this.serializeReference(mirror.scopeObject());
-}
+};
/**
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index a791dbba2..bc0c2fc5b 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -312,7 +312,7 @@ int main(int argc, char** argv) {
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
context.Dispose();
CppByteSink sink(argv[1]);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 8de7162ab..64bda9473 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -94,6 +94,9 @@ void HeapObject::HeapObjectVerify() {
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
+ case FREE_SPACE_TYPE:
+ FreeSpace::cast(this)->FreeSpaceVerify();
+ break;
case EXTERNAL_PIXEL_ARRAY_TYPE:
ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
break;
@@ -153,6 +156,12 @@ void HeapObject::HeapObjectVerify() {
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify();
break;
+ case JS_SET_TYPE:
+ JSSet::cast(this)->JSSetVerify();
+ break;
+ case JS_MAP_TYPE:
+ JSMap::cast(this)->JSMapVerify();
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
@@ -207,6 +216,11 @@ void ByteArray::ByteArrayVerify() {
}
+void FreeSpace::FreeSpaceVerify() {
+ ASSERT(IsFreeSpace());
+}
+
+
void ExternalPixelArray::ExternalPixelArrayVerify() {
ASSERT(IsExternalPixelArray());
}
@@ -255,12 +269,18 @@ void ExternalDoubleArray::ExternalDoubleArrayVerify() {
void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
+
+ if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+ ASSERT(this->elements()->IsFixedArray());
+ ASSERT(this->elements()->length() >= 2);
+ }
+
if (HasFastProperties()) {
CHECK_EQ(map()->unused_property_fields(),
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
- ASSERT_EQ(map()->has_fast_elements(),
+ ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
@@ -322,7 +342,8 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
double value = get_scalar(i);
ASSERT(!isnan(value) ||
(BitCast<uint64_t>(value) ==
- BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
+ BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
+ ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
}
}
}
@@ -387,6 +408,7 @@ void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
VerifyObjectField(kPrototypeOrInitialMapOffset);
VerifyObjectField(kNextFunctionLinkOffset);
+ CHECK(code()->IsCode());
CHECK(next_function_link()->IsUndefined() ||
next_function_link()->IsJSFunction());
}
@@ -446,9 +468,8 @@ void Oddball::OddballVerify() {
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
- // Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -4;
ASSERT(value <= 1);
+ // Hidden oddballs have negative smis.
ASSERT(value >= kLeastHiddenOddballNumber);
}
}
@@ -463,6 +484,7 @@ void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
void Code::CodeVerify() {
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
kCodeAlignment));
+ relocation_info()->Verify();
Address last_gc_pc = NULL;
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify();
@@ -484,11 +506,27 @@ void JSArray::JSArrayVerify() {
}
+void JSSet::JSSetVerify() {
+ CHECK(IsJSSet());
+ JSObjectVerify();
+ VerifyHeapPointer(table());
+ ASSERT(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
+void JSMap::JSMapVerify() {
+ CHECK(IsJSMap());
+ JSObjectVerify();
+ VerifyHeapPointer(table());
+ ASSERT(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
void JSWeakMap::JSWeakMapVerify() {
CHECK(IsJSWeakMap());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable());
+ ASSERT(table()->IsHashTable() || table()->IsUndefined());
}
@@ -535,13 +573,14 @@ void JSRegExp::JSRegExpVerify() {
void JSProxy::JSProxyVerify() {
- ASSERT(IsJSProxy());
+ CHECK(IsJSProxy());
VerifyPointer(handler());
+ ASSERT(hash()->IsSmi() || hash()->IsUndefined());
}
void JSFunctionProxy::JSFunctionProxyVerify() {
- ASSERT(IsJSFunctionProxy());
+ CHECK(IsJSFunctionProxy());
JSProxyVerify();
VerifyPointer(call_trap());
VerifyPointer(construct_trap());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 8796865c2..39d6e0413 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -43,8 +43,11 @@
#include "isolate.h"
#include "property.h"
#include "spaces.h"
+#include "store-buffer.h"
#include "v8memory.h"
+#include "incremental-marking.h"
+
namespace v8 {
namespace internal {
@@ -64,6 +67,13 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
+#define TYPE_CHECKER(type, instancetype) \
+ bool Object::Is##type() { \
+ return Object::IsHeapObject() && \
+ HeapObject::cast(this)->map()->instance_type() == instancetype; \
+ }
+
+
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
ASSERT(object->Is##type()); \
@@ -80,16 +90,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); \
- }
-
-
-// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
-#define ACCESSORS_GCSAFE(holder, name, type, offset) \
- type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
}
@@ -118,6 +119,23 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ if (to_kind == FAST_ELEMENTS) {
+ return from_kind == FAST_SMI_ONLY_ELEMENTS ||
+ from_kind == FAST_DOUBLE_ELEMENTS;
+ } else {
+ return to_kind == FAST_DOUBLE_ELEMENTS &&
+ from_kind == FAST_SMI_ONLY_ELEMENTS;
+ }
+}
+
+
+bool Object::IsFixedArrayBase() {
+ return IsFixedArray() || IsFixedDoubleArray();
+}
+
+
bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
// There is a constraint on the object; check.
if (!this->IsJSObject()) return false;
@@ -147,12 +165,15 @@ bool Object::IsHeapObject() {
}
-bool Object::IsHeapNumber() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
+bool Object::NonFailureIsHeapObject() {
+ ASSERT(!this->IsFailure());
+ return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
}
+TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+
+
bool Object::IsString() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
@@ -165,6 +186,13 @@ bool Object::IsSpecObject() {
}
+bool Object::IsSpecFunction() {
+ if (!Object::IsHeapObject()) return false;
+ InstanceType type = HeapObject::cast(this)->map()->instance_type();
+ return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
+}
+
+
bool Object::IsSymbol() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -396,19 +424,20 @@ bool Object::IsNumber() {
}
-bool Object::IsByteArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
-}
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-bool Object::IsExternalPixelArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_PIXEL_ARRAY_TYPE;
+bool Object::IsFiller() {
+ if (!Object::IsHeapObject()) return false;
+ InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+ return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
+TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
+
+
bool Object::IsExternalArray() {
if (!Object::IsHeapObject())
return false;
@@ -419,60 +448,14 @@ bool Object::IsExternalArray() {
}
-bool Object::IsExternalByteArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedByteArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalShortArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedShortArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalIntArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedIntArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalFloatArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_FLOAT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalDoubleArray() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() ==
- EXTERNAL_DOUBLE_ARRAY_TYPE;
-}
+TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
+TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
bool MaybeObject::IsFailure() {
@@ -509,59 +492,34 @@ Failure* Failure::cast(MaybeObject* obj) {
bool Object::IsJSReceiver() {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
bool Object::IsJSObject() {
- return IsJSReceiver() && !IsJSProxy();
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
}
bool Object::IsJSProxy() {
- return Object::IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
- HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
-}
-
-
-bool Object::IsJSFunctionProxy() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
-}
-
-
-bool Object::IsJSWeakMap() {
- return Object::IsJSObject() &&
- HeapObject::cast(this)->map()->instance_type() == JS_WEAK_MAP_TYPE;
-}
-
-
-bool Object::IsJSContextExtensionObject() {
- return IsHeapObject()
- && (HeapObject::cast(this)->map()->instance_type() ==
- JS_CONTEXT_EXTENSION_OBJECT_TYPE);
-}
-
-
-bool Object::IsMap() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
-}
-
-
-bool Object::IsFixedArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
+ if (!Object::IsHeapObject()) return false;
+ InstanceType type = HeapObject::cast(this)->map()->instance_type();
+ return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
}
-bool Object::IsFixedDoubleArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() ==
- FIXED_DOUBLE_ARRAY_TYPE;
-}
+TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
+TYPE_CHECKER(Map, MAP_TYPE)
+TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
bool Object::IsDescriptorArray() {
@@ -617,17 +575,14 @@ bool Object::IsGlobalContext() {
}
-bool Object::IsSerializedScopeInfo() {
+bool Object::IsScopeInfo() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->serialized_scope_info_map();
+ HeapObject::cast(this)->GetHeap()->scope_info_map();
}
-bool Object::IsJSFunction() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
-}
+TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
template <> inline bool Is<JSFunction>(Object* obj) {
@@ -635,44 +590,12 @@ template <> inline bool Is<JSFunction>(Object* obj) {
}
-bool Object::IsCode() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
-}
-
-
-bool Object::IsOddball() {
- ASSERT(HEAP->is_safe_to_read_maps());
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
-}
-
-
-bool Object::IsJSGlobalPropertyCell() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type()
- == JS_GLOBAL_PROPERTY_CELL_TYPE;
-}
-
-
-bool Object::IsSharedFunctionInfo() {
- return Object::IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- SHARED_FUNCTION_INFO_TYPE);
-}
-
-
-bool Object::IsJSValue() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
-}
-
-
-bool Object::IsJSMessageObject() {
- return Object::IsHeapObject()
- && (HeapObject::cast(this)->map()->instance_type() ==
- JS_MESSAGE_OBJECT_TYPE);
-}
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(Oddball, ODDBALL_TYPE)
+TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
+TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
bool Object::IsStringWrapper() {
@@ -680,10 +603,7 @@ bool Object::IsStringWrapper() {
}
-bool Object::IsForeign() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
-}
+TYPE_CHECKER(Foreign, FOREIGN_TYPE)
bool Object::IsBoolean() {
@@ -692,16 +612,8 @@ bool Object::IsBoolean() {
}
-bool Object::IsJSArray() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
-}
-
-
-bool Object::IsJSRegExp() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
-}
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
template <> inline bool Is<JSArray>(Object* obj) {
@@ -738,7 +650,10 @@ bool Object::IsJSFunctionResultCache() {
return false;
}
#ifdef DEBUG
- reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
+ if (FLAG_verify_heap) {
+ reinterpret_cast<JSFunctionResultCache*>(this)->
+ JSFunctionResultCacheVerify();
+ }
#endif
return true;
}
@@ -750,7 +665,9 @@ bool Object::IsNormalizedMapCache() {
return false;
}
#ifdef DEBUG
- reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+ if (FLAG_verify_heap) {
+ reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+ }
#endif
return true;
}
@@ -799,18 +716,8 @@ bool Object::IsGlobalObject() {
}
-bool Object::IsJSGlobalObject() {
- return IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_GLOBAL_OBJECT_TYPE);
-}
-
-
-bool Object::IsJSBuiltinsObject() {
- return IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_BUILTINS_OBJECT_TYPE);
-}
+TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
+TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
bool Object::IsUndetectableObject() {
@@ -939,21 +846,20 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-// TODO(isolates): Pass heap in to these macros.
-#define WRITE_BARRIER(object, offset) \
- object->GetHeap()->RecordWrite(object->address(), offset);
-
-// CONDITIONAL_WRITE_BARRIER must be issued after the actual
-// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->RecordWrite(object->address(), offset); \
- } else { \
- ASSERT(mode == SKIP_WRITE_BARRIER); \
- ASSERT(heap->InNewSpace(object) || \
- !heap->InNewSpace(READ_FIELD(object, offset)) || \
- Page::FromAddress(object->address())-> \
- IsRegionDirty(object->address() + offset)); \
+#define WRITE_BARRIER(heap, object, offset, value) \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ if (heap->InNewSpace(value)) { \
+ heap->RecordWrite(object->address(), offset); \
+ }
+
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ if (heap->InNewSpace(value)) { \
+ heap->RecordWrite(object->address(), offset); \
+ } \
}
#ifndef V8_TARGET_ARCH_MIPS
@@ -974,7 +880,6 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
#endif // V8_TARGET_ARCH_MIPS
-
#ifndef V8_TARGET_ARCH_MIPS
#define WRITE_DOUBLE_FIELD(p, offset, value) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
@@ -1169,91 +1074,6 @@ HeapObject* MapWord::ToForwardingAddress() {
}
-bool MapWord::IsMarked() {
- return (value_ & kMarkingMask) == 0;
-}
-
-
-void MapWord::SetMark() {
- value_ &= ~kMarkingMask;
-}
-
-
-void MapWord::ClearMark() {
- value_ |= kMarkingMask;
-}
-
-
-bool MapWord::IsOverflowed() {
- return (value_ & kOverflowMask) != 0;
-}
-
-
-void MapWord::SetOverflow() {
- value_ |= kOverflowMask;
-}
-
-
-void MapWord::ClearOverflow() {
- value_ &= ~kOverflowMask;
-}
-
-
-MapWord MapWord::EncodeAddress(Address map_address, int offset) {
- // Offset is the distance in live bytes from the first live object in the
- // same page. The offset between two objects in the same page should not
- // exceed the object area size of a page.
- ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
-
- uintptr_t compact_offset = offset >> kObjectAlignmentBits;
- ASSERT(compact_offset < (1 << kForwardingOffsetBits));
-
- Page* map_page = Page::FromAddress(map_address);
- ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
-
- uintptr_t map_page_offset =
- map_page->Offset(map_address) >> kMapAlignmentBits;
-
- uintptr_t encoding =
- (compact_offset << kForwardingOffsetShift) |
- (map_page_offset << kMapPageOffsetShift) |
- (map_page->mc_page_index << kMapPageIndexShift);
- return MapWord(encoding);
-}
-
-
-Address MapWord::DecodeMapAddress(MapSpace* map_space) {
- int map_page_index =
- static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
- ASSERT_MAP_PAGE_INDEX(map_page_index);
-
- int map_page_offset = static_cast<int>(
- ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
- kMapAlignmentBits);
-
- return (map_space->PageAddress(map_page_index) + map_page_offset);
-}
-
-
-int MapWord::DecodeOffset() {
- // The offset field is represented in the kForwardingOffsetBits
- // most-significant bits.
- uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
- ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
- return static_cast<int>(offset);
-}
-
-
-MapWord MapWord::FromEncodedAddress(Address address) {
- return MapWord(reinterpret_cast<uintptr_t>(address));
-}
-
-
-Address MapWord::ToEncodedAddress() {
- return reinterpret_cast<Address>(value_);
-}
-
-
#ifdef DEBUG
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
@@ -1266,12 +1086,11 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
- // During GC, the map pointer in HeapObject is used in various ways that
- // prevent us from retrieving Heap from the map.
- // Assert that we are not in GC, implement GC code in a way that it doesn't
- // pull heap from the map.
- ASSERT(HEAP->is_safe_to_read_maps());
- return map()->heap();
+ Heap* heap =
+ MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
}
@@ -1287,6 +1106,17 @@ Map* HeapObject::map() {
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
+ if (value != NULL) {
+ // TODO(1600) We are passing NULL as a slot because maps can never be on
+ // evacuation candidate.
+ value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+ }
+}
+
+
+// Unsafe accessor omitting write barrier.
+void HeapObject::set_map_unsafe(Map* value) {
+ set_map_word(MapWord::FromMap(value));
}
@@ -1329,47 +1159,6 @@ void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
}
-bool HeapObject::IsMarked() {
- return map_word().IsMarked();
-}
-
-
-void HeapObject::SetMark() {
- ASSERT(!IsMarked());
- MapWord first_word = map_word();
- first_word.SetMark();
- set_map_word(first_word);
-}
-
-
-void HeapObject::ClearMark() {
- ASSERT(IsMarked());
- MapWord first_word = map_word();
- first_word.ClearMark();
- set_map_word(first_word);
-}
-
-
-bool HeapObject::IsOverflowed() {
- return map_word().IsOverflowed();
-}
-
-
-void HeapObject::SetOverflow() {
- MapWord first_word = map_word();
- first_word.SetOverflow();
- set_map_word(first_word);
-}
-
-
-void HeapObject::ClearOverflow() {
- ASSERT(IsOverflowed());
- MapWord first_word = map_word();
- first_word.ClearOverflow();
- set_map_word(first_word);
-}
-
-
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1396,20 +1185,80 @@ ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
- ASSERT(array->HasValidElements());
return static_cast<FixedArrayBase*>(array);
}
+void JSObject::ValidateSmiOnlyElements() {
+#if DEBUG
+ if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+ Heap* heap = GetHeap();
+ // Don't use elements, since integrity checks will fail if there
+ // are filler pointers in the array.
+ FixedArray* fixed_array =
+ reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
+ Map* map = fixed_array->map();
+ // Arrays that have been shifted in place can't be verified.
+ if (map != heap->raw_unchecked_one_pointer_filler_map() &&
+ map != heap->raw_unchecked_two_pointer_filler_map() &&
+ map != heap->free_space_map()) {
+ for (int i = 0; i < fixed_array->length(); i++) {
+ Object* current = fixed_array->get(i);
+ ASSERT(current->IsSmi() || current == heap->the_hole_value());
+ }
+ }
+ }
+#endif
+}
+
+
+MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
+#if DEBUG
+ ValidateSmiOnlyElements();
+#endif
+ if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
+ Object* obj;
+ MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ set_map(Map::cast(obj));
+ }
+ return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
+ uint32_t count) {
+ if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
+ return EnsureCanContainNonSmiElements();
+ }
+ }
+ }
+ return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
+ Object** objects = reinterpret_cast<Object**>(
+ FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
+ return EnsureCanContainElements(objects, elements->length());
+}
+
void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
- ASSERT(map()->has_fast_elements() ==
+ ASSERT((map()->has_fast_elements() ||
+ map()->has_fast_smi_only_elements()) ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_double_elements() ==
value->IsFixedDoubleArray());
ASSERT(value->HasValidElements());
+#ifdef DEBUG
+ ValidateSmiOnlyElements();
+#endif
WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
}
@@ -1420,7 +1269,7 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_elements());
+ ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
}
@@ -1428,9 +1277,11 @@ void JSObject::initialize_elements() {
MaybeObject* JSObject::ResetElements() {
Object* obj;
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ ElementsKind elements_kind = FLAG_smi_only_arrays
+ ? FAST_SMI_ONLY_ELEMENTS
+ : FAST_ELEMENTS;
+ MaybeObject* maybe_obj = GetElementsTransitionMap(elements_kind);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
set_map(Map::cast(obj));
initialize_elements();
return this;
@@ -1442,12 +1293,12 @@ ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
byte Oddball::kind() {
- return READ_BYTE_FIELD(this, kKindOffset);
+ return Smi::cast(READ_FIELD(this, kKindOffset))->value();
}
void Oddball::set_kind(byte value) {
- WRITE_BYTE_FIELD(this, kKindOffset, value);
+ WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
}
@@ -1460,6 +1311,8 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
+ GetHeap()->incremental_marking()->RecordWrite(
+ this, HeapObject::RawField(this, kValueOffset), val);
}
@@ -1528,7 +1381,17 @@ void JSObject::SetInternalField(int index, Object* value) {
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+
+void JSObject::SetInternalField(int index, Smi* value) {
+ ASSERT(index < GetInternalFieldCount() && index >= 0);
+ // Internal objects do follow immediately after the header, whereas in-object
+ // properties are at the end of the object. Therefore there is no need
+ // to adjust the index here.
+ int offset = GetHeaderSize() + (kPointerSize * index);
+ WRITE_FIELD(this, offset, value);
}
@@ -1554,7 +1417,7 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) {
if (index < 0) {
int offset = map()->instance_size() + (index * kPointerSize);
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
} else {
ASSERT(index < properties()->length());
properties()->set(index, value);
@@ -1588,16 +1451,32 @@ Object* JSObject::InObjectPropertyAtPut(int index,
ASSERT(index < 0);
int offset = map()->instance_size() + (index * kPointerSize);
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
return value;
}
-void JSObject::InitializeBody(int object_size, Object* value) {
- ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
+void JSObject::InitializeBody(Map* map,
+ Object* pre_allocated_value,
+ Object* filler_value) {
+ ASSERT(!filler_value->IsHeapObject() ||
+ !GetHeap()->InNewSpace(filler_value));
+ ASSERT(!pre_allocated_value->IsHeapObject() ||
+ !GetHeap()->InNewSpace(pre_allocated_value));
+ int size = map->instance_size();
+ int offset = kHeaderSize;
+ if (filler_value != pre_allocated_value) {
+ int pre_allocated = map->pre_allocated_property_fields();
+ ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
+ for (int i = 0; i < pre_allocated; i++) {
+ WRITE_FIELD(this, offset, pre_allocated_value);
+ offset += kPointerSize;
+ }
+ }
+ while (offset < size) {
+ WRITE_FIELD(this, offset, filler_value);
+ offset += kPointerSize;
}
}
@@ -1683,7 +1562,7 @@ void FixedArray::set(int index, Object* value) {
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(this, offset);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
}
@@ -1772,7 +1651,7 @@ void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
void FixedDoubleArray::Initialize(FixedArray* from) {
int old_length = from->length();
- ASSERT(old_length < length());
+ ASSERT(old_length <= length());
for (int i = 0; i < old_length; i++) {
Object* hole_or_object = from->get(i);
if (hole_or_object->IsTheHole()) {
@@ -1806,7 +1685,9 @@ void FixedDoubleArray::Initialize(NumberDictionary* from) {
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
- if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ Heap* heap = GetHeap();
+ if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
+ if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
@@ -1818,11 +1699,13 @@ void FixedArray::set(int index,
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
-void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
+void FixedArray::NoWriteBarrierSet(FixedArray* array,
+ int index,
+ Object* value) {
ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
ASSERT(!HEAP->InNewSpace(value));
@@ -1879,7 +1762,7 @@ void FixedArray::set_unchecked(Heap* heap,
WriteBarrierMode mode) {
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
}
@@ -1914,10 +1797,12 @@ void DescriptorArray::set_bit_field3_storage(int value) {
}
-void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
+void DescriptorArray::NoWriteBarrierSwap(FixedArray* array,
+ int first,
+ int second) {
Object* tmp = array->get(first);
- fast_set(array, first, array->get(second));
- fast_set(array, second, tmp);
+ NoWriteBarrierSet(array, first, array->get(second));
+ NoWriteBarrierSet(array, second, tmp);
}
@@ -1992,19 +1877,17 @@ Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
ASSERT(GetType(descriptor_number) == CALLBACKS);
Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
- return reinterpret_cast<AccessorDescriptor*>(p->address());
+ return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
}
bool DescriptorArray::IsProperty(int descriptor_number) {
- return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
+ return IsRealProperty(GetType(descriptor_number));
}
bool DescriptorArray::IsTransition(int descriptor_number) {
- PropertyType t = GetType(descriptor_number);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
- t == ELEMENTS_TRANSITION;
+ return IsTransitionType(GetType(descriptor_number));
}
@@ -2025,7 +1908,9 @@ void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
}
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+void DescriptorArray::Set(int descriptor_number,
+ Descriptor* desc,
+ const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
@@ -2033,26 +1918,53 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
ASSERT(!HEAP->InNewSpace(desc->GetKey()));
ASSERT(!HEAP->InNewSpace(desc->GetValue()));
- fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
+ NoWriteBarrierSet(this,
+ ToKeyIndex(descriptor_number),
+ desc->GetKey());
FixedArray* content_array = GetContentArray();
- fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
- fast_set(content_array, ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
+ NoWriteBarrierSet(content_array,
+ ToValueIndex(descriptor_number),
+ desc->GetValue());
+ NoWriteBarrierSet(content_array,
+ ToDetailsIndex(descriptor_number),
+ desc->GetDetails().AsSmi());
}
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
+void DescriptorArray::CopyFrom(int index,
+ DescriptorArray* src,
+ int src_index,
+ const WhitenessWitness& witness) {
Descriptor desc;
src->Get(src_index, &desc);
- Set(index, &desc);
+ Set(index, &desc, witness);
}
-void DescriptorArray::Swap(int first, int second) {
- fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
+void DescriptorArray::NoWriteBarrierSwapDescriptors(int first, int second) {
+ NoWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
FixedArray* content_array = GetContentArray();
- fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
- fast_swap(content_array, ToDetailsIndex(first), ToDetailsIndex(second));
+ NoWriteBarrierSwap(content_array,
+ ToValueIndex(first),
+ ToValueIndex(second));
+ NoWriteBarrierSwap(content_array,
+ ToDetailsIndex(first),
+ ToDetailsIndex(second));
+}
+
+
+DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
+ : marking_(array->GetHeap()->incremental_marking()) {
+ marking_->EnterNoMarkingScope();
+ if (array->number_of_descriptors() > 0) {
+ ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
+ ASSERT(Marking::Color(array->GetContentArray()) == Marking::WHITE_OBJECT);
+ }
+}
+
+
+DescriptorArray::WhitenessWitness::~WhitenessWitness() {
+ marking_->LeaveNoMarkingScope();
}
@@ -2083,7 +1995,7 @@ int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
while (true) {
Object* element = KeyAt(entry);
if (element == isolate->heap()->undefined_value()) break; // Empty entry.
- if (element != isolate->heap()->null_value() &&
+ if (element != isolate->heap()->the_hole_value() &&
Shape::IsMatch(key, element)) return entry;
entry = NextProbe(entry, count++, capacity);
}
@@ -2123,6 +2035,7 @@ CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(ScopeInfo)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
@@ -2155,9 +2068,12 @@ CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSFunctionProxy)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSMap)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(ExternalArray)
CAST_ACCESSOR(ExternalByteArray)
CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -2184,6 +2100,7 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
@@ -2340,7 +2257,7 @@ String* SlicedString::parent() {
void SlicedString::set_parent(String* parent) {
- ASSERT(parent->IsSeqString());
+ ASSERT(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
}
@@ -2360,7 +2277,7 @@ Object* ConsString::unchecked_first() {
void ConsString::set_first(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
}
@@ -2376,29 +2293,83 @@ Object* ConsString::unchecked_second() {
void ConsString::set_second(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
+}
+
+
+bool ExternalString::is_short() {
+ InstanceType type = map()->instance_type();
+ return (type & kShortExternalStringMask) == kShortExternalStringTag;
}
-ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
+void ExternalAsciiString::update_data_cache() {
+ if (is_short()) return;
+ const char** data_field =
+ reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+ *data_field = resource()->data();
+}
+
+
void ExternalAsciiString::set_resource(
- ExternalAsciiString::Resource* resource) {
- *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+ const ExternalAsciiString::Resource* resource) {
+ *reinterpret_cast<const Resource**>(
+ FIELD_ADDR(this, kResourceOffset)) = resource;
+ if (resource != NULL) update_data_cache();
+}
+
+
+const char* ExternalAsciiString::GetChars() {
+ return resource()->data();
}
-ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+ ASSERT(index >= 0 && index < length());
+ return GetChars()[index];
+}
+
+
+const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
+void ExternalTwoByteString::update_data_cache() {
+ if (is_short()) return;
+ const uint16_t** data_field =
+ reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
+ *data_field = resource()->data();
+}
+
+
void ExternalTwoByteString::set_resource(
- ExternalTwoByteString::Resource* resource) {
- *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+ const ExternalTwoByteString::Resource* resource) {
+ *reinterpret_cast<const Resource**>(
+ FIELD_ADDR(this, kResourceOffset)) = resource;
+ if (resource != NULL) update_data_cache();
+}
+
+
+const uint16_t* ExternalTwoByteString::GetChars() {
+ return resource()->data();
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+ ASSERT(index >= 0 && index < length());
+ return GetChars()[index];
+}
+
+
+const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
+ unsigned start) {
+ return GetChars() + start;
}
@@ -2698,6 +2669,9 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
+ if (instance_type == FREE_SPACE_TYPE) {
+ return reinterpret_cast<FreeSpace*>(this)->size();
+ }
if (instance_type == STRING_TYPE) {
return SeqTwoByteString::SizeFor(
reinterpret_cast<SeqTwoByteString*>(this)->length());
@@ -2859,12 +2833,6 @@ JSFunction* Map::unchecked_constructor() {
}
-FixedArray* Map::unchecked_prototype_transitions() {
- return reinterpret_cast<FixedArray*>(
- READ_FIELD(this, kPrototypeTransitionsOffset));
-}
-
-
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -2936,6 +2904,19 @@ void Code::set_major_key(int major) {
}
+bool Code::is_pregenerated() {
+ return kind() == STUB && IsPregeneratedField::decode(flags());
+}
+
+
+void Code::set_is_pregenerated(bool value) {
+ ASSERT(kind() == STUB);
+ Flags f = flags();
+ f = static_cast<Flags>(IsPregeneratedField::update(f, value));
+ set_flags(f);
+}
+
+
bool Code::optimizable() {
ASSERT(kind() == FUNCTION);
return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -2978,6 +2959,21 @@ void Code::set_has_debug_break_slots(bool value) {
}
+bool Code::is_compiled_optimizable() {
+ ASSERT(kind() == FUNCTION);
+ byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ return FullCodeFlagsIsCompiledOptimizable::decode(flags);
+}
+
+
+void Code::set_compiled_optimizable(bool value) {
+ ASSERT(kind() == FUNCTION);
+ byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
+ WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+}
+
+
int Code::allow_osr_at_loop_nesting_level() {
ASSERT(kind() == FUNCTION);
return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
@@ -3101,6 +3097,19 @@ void Code::set_to_boolean_state(byte value) {
WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
}
+
+bool Code::has_function_cache() {
+ ASSERT(kind() == STUB);
+ return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+}
+
+
+void Code::set_has_function_cache(bool flag) {
+ ASSERT(kind() == STUB);
+ WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+}
+
+
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3186,48 +3195,6 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
}
-Isolate* Map::isolate() {
- return heap()->isolate();
-}
-
-
-Heap* Map::heap() {
- // NOTE: address() helper is not used to save one instruction.
- Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Heap* Code::heap() {
- // NOTE: address() helper is not used to save one instruction.
- Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Isolate* Code::isolate() {
- return heap()->isolate();
-}
-
-
-Heap* JSGlobalPropertyCell::heap() {
- // NOTE: address() helper is not used to save one instruction.
- Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
-}
-
-
-Isolate* JSGlobalPropertyCell::isolate() {
- return heap()->isolate();
-}
-
-
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
return HeapObject::
FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3242,46 +3209,7 @@ Object* Map::prototype() {
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
ASSERT(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
-}
-
-
-MaybeObject* Map::GetFastElementsMap() {
- if (has_fast_elements()) return this;
- Object* obj;
- { MaybeObject* maybe_obj = CopyDropTransitions();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
- new_map->set_elements_kind(FAST_ELEMENTS);
- isolate()->counters()->map_to_fast_elements()->Increment();
- return new_map;
-}
-
-
-MaybeObject* Map::GetFastDoubleElementsMap() {
- if (has_fast_double_elements()) return this;
- Object* obj;
- { MaybeObject* maybe_obj = CopyDropTransitions();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
- new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
- isolate()->counters()->map_to_fast_double_elements()->Increment();
- return new_map;
-}
-
-
-MaybeObject* Map::GetSlowElementsMap() {
- if (!has_fast_elements() && !has_fast_double_elements()) return this;
- Object* obj;
- { MaybeObject* maybe_obj = CopyDropTransitions();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- isolate()->counters()->map_to_slow_elements()->Increment();
- return new_map;
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
}
@@ -3316,7 +3244,8 @@ void Map::set_instance_descriptors(DescriptorArray* value,
WriteBarrierMode mode) {
Object* object = READ_FIELD(this,
kInstanceDescriptorsOrBitField3Offset);
- if (value == isolate()->heap()->empty_descriptor_array()) {
+ Heap* heap = GetHeap();
+ if (value == heap->empty_descriptor_array()) {
clear_instance_descriptors();
return;
} else {
@@ -3329,10 +3258,8 @@ void Map::set_instance_descriptors(DescriptorArray* value,
}
ASSERT(!is_shared());
WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(),
- this,
- kInstanceDescriptorsOrBitField3Offset,
- mode);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
}
@@ -3361,14 +3288,22 @@ void Map::set_bit_field3(int value) {
}
+FixedArray* Map::unchecked_prototype_transitions() {
+ return reinterpret_cast<FixedArray*>(
+ READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
- kNextFunctionLinkOffset)
+ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction,
+ next_function_link,
+ Object,
+ kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3457,8 +3392,8 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -3607,8 +3542,39 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
}
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, strict_mode,
- kStrictModeFunction)
+LanguageMode SharedFunctionInfo::language_mode() {
+ int hints = compiler_hints();
+ if (BooleanBit::get(hints, kExtendedModeFunction)) {
+ ASSERT(BooleanBit::get(hints, kStrictModeFunction));
+ return EXTENDED_MODE;
+ }
+ return BooleanBit::get(hints, kStrictModeFunction)
+ ? STRICT_MODE : CLASSIC_MODE;
+}
+
+
+void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
+ // We only allow language mode transitions that go set the same language mode
+ // again or go up in the chain:
+ // CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
+ ASSERT(this->language_mode() == CLASSIC_MODE ||
+ this->language_mode() == language_mode ||
+ language_mode == EXTENDED_MODE);
+ int hints = compiler_hints();
+ hints = BooleanBit::set(
+ hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
+ hints = BooleanBit::set(
+ hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
+ set_compiler_hints(hints);
+}
+
+
+bool SharedFunctionInfo::is_classic_mode() {
+ return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
+}
+
+BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
+ kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
@@ -3664,20 +3630,23 @@ Code* SharedFunctionInfo::unchecked_code() {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
- ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
+ CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
-SerializedScopeInfo* SharedFunctionInfo::scope_info() {
- return reinterpret_cast<SerializedScopeInfo*>(
- READ_FIELD(this, kScopeInfoOffset));
+ScopeInfo* SharedFunctionInfo::scope_info() {
+ return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
}
-void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
+void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
WriteBarrierMode mode) {
WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(),
+ this,
+ kScopeInfoOffset,
+ reinterpret_cast<Object*>(value),
+ mode);
}
@@ -3725,8 +3694,8 @@ int SharedFunctionInfo::code_age() {
void SharedFunctionInfo::set_code_age(int code_age) {
- set_compiler_hints(compiler_hints() |
- ((code_age & kCodeAgeMask) << kCodeAgeShift));
+ int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
+ set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
}
@@ -3774,10 +3743,13 @@ Code* JSFunction::unchecked_code() {
void JSFunction::set_code(Code* value) {
- // Skip the write barrier because code is never in new space.
ASSERT(!HEAP->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+ GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
+ this,
+ HeapObject::RawField(this, kCodeEntryOffset),
+ value);
}
@@ -3817,7 +3789,7 @@ SharedFunctionInfo* JSFunction::unchecked_shared() {
void JSFunction::set_context(Object* value) {
ASSERT(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(this, kContextOffset);
+ WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
}
ACCESSORS(JSFunction, prototype_or_initial_map, Object,
@@ -3876,7 +3848,36 @@ bool JSFunction::is_compiled() {
}
+FixedArray* JSFunction::literals() {
+ ASSERT(!shared()->bound());
+ return literals_or_bindings();
+}
+
+
+void JSFunction::set_literals(FixedArray* literals) {
+ ASSERT(!shared()->bound());
+ set_literals_or_bindings(literals);
+}
+
+
+FixedArray* JSFunction::function_bindings() {
+ ASSERT(shared()->bound());
+ return literals_or_bindings();
+}
+
+
+void JSFunction::set_function_bindings(FixedArray* bindings) {
+ ASSERT(shared()->bound());
+ // Bound function literal may be initialized to the empty fixed array
+ // before the bindings are set.
+ ASSERT(bindings == GetHeap()->empty_fixed_array() ||
+ bindings->map() == GetHeap()->fixed_cow_array_map());
+ set_literals_or_bindings(bindings);
+}
+
+
int JSFunction::NumberOfLiterals() {
+ ASSERT(!shared()->bound());
return literals()->length();
}
@@ -3891,7 +3892,7 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Object* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
- WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
+ WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
}
@@ -3910,6 +3911,7 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+ACCESSORS(JSProxy, hash, Object, kHashOffset)
ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
@@ -3922,22 +3924,19 @@ void JSProxy::InitializeBody(int object_size, Object* value) {
}
-ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
-ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
-
-
-ObjectHashTable* JSWeakMap::unchecked_table() {
- return reinterpret_cast<ObjectHashTable*>(READ_FIELD(this, kTableOffset));
-}
+ACCESSORS(JSSet, table, Object, kTableOffset)
+ACCESSORS(JSMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, next, Object, kNextOffset)
-Address Foreign::address() {
- return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
+Address Foreign::foreign_address() {
+ return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
}
-void Foreign::set_address(Address value) {
- WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
+void Foreign::set_foreign_address(Address value) {
+ WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
}
@@ -3969,6 +3968,7 @@ JSMessageObject* JSMessageObject::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
+ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, next_code_flushing_candidate,
Object, kNextCodeFlushingCandidateOffset)
@@ -4015,9 +4015,8 @@ byte* Code::entry() {
}
-bool Code::contains(byte* pc) {
- return (instruction_start() <= pc) &&
- (pc <= instruction_start() + instruction_size());
+bool Code::contains(byte* inner_pointer) {
+ return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
}
@@ -4096,6 +4095,7 @@ void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
if (value->IsSmi()) {
fa->set_unchecked(index, Smi::cast(value));
} else {
+ // We only do this during GC, so we don't need to notify the write barrier.
fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
}
}
@@ -4103,15 +4103,22 @@ void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
- ASSERT((kind == FAST_ELEMENTS &&
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map())) ||
- (kind == FAST_DOUBLE_ELEMENTS &&
- elements()->IsFixedDoubleArray()) ||
- (kind == DICTIONARY_ELEMENTS &&
- elements()->IsFixedArray() &&
- elements()->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
+#if DEBUG
+ FixedArrayBase* fixed_array =
+ reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
+ Map* map = fixed_array->map();
+ ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
+ (map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map())) ||
+ (kind == FAST_DOUBLE_ELEMENTS &&
+ fixed_array->IsFixedDoubleArray()) ||
+ (kind == DICTIONARY_ELEMENTS &&
+ fixed_array->IsFixedArray() &&
+ fixed_array->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
+#endif
return kind;
}
@@ -4126,6 +4133,18 @@ bool JSObject::HasFastElements() {
}
+bool JSObject::HasFastSmiOnlyElements() {
+ return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
+}
+
+
+bool JSObject::HasFastTypeElements() {
+ ElementsKind elements_kind = GetElementsKind();
+ return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_ELEMENTS;
+}
+
+
bool JSObject::HasFastDoubleElements() {
return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
}
@@ -4136,6 +4155,11 @@ bool JSObject::HasDictionaryElements() {
}
+bool JSObject::HasNonStrictArgumentsElements() {
+ return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+}
+
+
bool JSObject::HasExternalArrayElements() {
HeapObject* array = elements();
ASSERT(array != NULL);
@@ -4187,7 +4211,7 @@ bool JSObject::AllowsSetElementsLength() {
MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastElements());
+ ASSERT(HasFastTypeElements());
FixedArray* elems = FixedArray::cast(elements());
Isolate* isolate = GetIsolate();
if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4363,44 +4387,18 @@ Object* JSObject::BypassGlobalProxy() {
}
-bool JSObject::HasHiddenPropertiesObject() {
- ASSERT(!IsJSGlobalProxy());
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_symbol(),
- false) != ABSENT;
+MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
+ return IsJSProxy()
+ ? JSProxy::cast(this)->GetIdentityHash(flag)
+ : JSObject::cast(this)->GetIdentityHash(flag);
}
-Object* JSObject::GetHiddenPropertiesObject() {
- ASSERT(!IsJSGlobalProxy());
- PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden symbol,
- // so we can be sure that GetLocalPropertyPostInterceptor returns a real
- // object.
- Object* result =
- GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_symbol(),
- &attributes)->ToObjectUnchecked();
- return result;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
- ASSERT(!IsJSGlobalProxy());
- return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- hidden_obj,
- DONT_ENUM,
- kNonStrictMode);
-}
-
-
-bool JSObject::HasHiddenProperties() {
- return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
-}
-
-
-bool JSObject::HasElement(uint32_t index) {
- return HasElementWithReceiver(this, index);
+bool JSReceiver::HasElement(uint32_t index) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->HasElementWithHandler(index);
+ }
+ return JSObject::cast(this)->HasElementWithReceiver(this, index);
}
@@ -4463,7 +4461,7 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
- FixedArray::fast_set(this, index+2, details.AsSmi());
+ FixedArray::set(index+2, details.AsSmi());
}
@@ -4512,36 +4510,33 @@ MaybeObject* StringDictionaryShape::AsObject(String* key) {
}
-bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
- return key == JSObject::cast(other);
+template <int entrysize>
+bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+ return key->SameValue(other);
}
-uint32_t ObjectHashTableShape::Hash(JSObject* key) {
- MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
- ASSERT(!maybe_hash->IsFailure());
- return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+template <int entrysize>
+uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
+ MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+ return Smi::cast(maybe_hash->ToObjectChecked())->value();
}
-uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
- MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
- JSObject::OMIT_CREATION);
- ASSERT(!maybe_hash->IsFailure());
- return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+template <int entrysize>
+uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
+ Object* other) {
+ MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
+ return Smi::cast(maybe_hash->ToObjectChecked())->value();
}
-MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
+template <int entrysize>
+MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) {
return key;
}
-void ObjectHashTable::RemoveEntry(int entry) {
- RemoveEntry(entry, GetHeap());
-}
-
-
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
@@ -4552,7 +4547,7 @@ void Map::ClearCodeCache(Heap* heap) {
void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastElements());
+ ASSERT(HasFastTypeElements());
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
@@ -4570,13 +4565,17 @@ void JSArray::EnsureSize(int required_size) {
void JSArray::set_length(Smi* length) {
+ // Don't need a write barrier for a Smi.
set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
}
-void JSArray::SetContent(FixedArray* storage) {
+MaybeObject* JSArray::SetContent(FixedArray* storage) {
+ MaybeObject* maybe_object = EnsureCanContainElements(storage);
+ if (maybe_object->IsFailure()) return maybe_object;
set_length(Smi::FromInt(storage->length()));
set_elements(storage);
+ return this;
}
@@ -4586,6 +4585,12 @@ MaybeObject* FixedArray::Copy() {
}
+MaybeObject* FixedDoubleArray::Copy() {
+ if (length() == 0) return this;
+ return GetHeap()->CopyFixedDoubleArray(this);
+}
+
+
Relocatable::Relocatable(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
isolate_ = isolate;
@@ -4608,14 +4613,14 @@ int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
void Foreign::ForeignIterateBody(ObjectVisitor* v) {
v->VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+ reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
}
template<typename StaticVisitor>
void Foreign::ForeignIterateBody() {
StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+ reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 0398572f9..4b5d049f5 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -82,12 +82,18 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberPrint(out);
break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(out);
break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayPrint(out);
break;
+ case FREE_SPACE_TYPE:
+ FreeSpace::cast(this)->FreeSpacePrint(out);
+ break;
case EXTERNAL_PIXEL_ARRAY_TYPE:
ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
break;
@@ -189,6 +195,11 @@ void ByteArray::ByteArrayPrint(FILE* out) {
}
+void FreeSpace::FreeSpacePrint(FILE* out) {
+ PrintF(out, "free space, size %d", Size());
+}
+
+
void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
PrintF(out, "external pixel array");
}
@@ -256,16 +267,37 @@ void JSObject::PrintProperties(FILE* out) {
descs->GetCallbacksObject(i)->ShortPrint(out);
PrintF(out, " (callback)\n");
break;
+ case ELEMENTS_TRANSITION: {
+ PrintF(out, "(elements transition to ");
+ Object* descriptor_contents = descs->GetValue(i);
+ if (descriptor_contents->IsMap()) {
+ Map* map = Map::cast(descriptor_contents);
+ PrintElementsKind(out, map->elements_kind());
+ } else {
+ FixedArray* map_array = FixedArray::cast(descriptor_contents);
+ for (int i = 0; i < map_array->length(); ++i) {
+ Map* map = Map::cast(map_array->get(i));
+ if (i != 0) {
+ PrintF(out, ", ");
+ }
+ PrintElementsKind(out, map->elements_kind());
+ }
+ }
+ PrintF(out, ")\n");
+ break;
+ }
case MAP_TRANSITION:
- PrintF(out, " (map transition)\n");
+ PrintF(out, "(map transition)\n");
break;
case CONSTANT_TRANSITION:
- PrintF(out, " (constant transition)\n");
+ PrintF(out, "(constant transition)\n");
break;
case NULL_DESCRIPTOR:
- PrintF(out, " (null descriptor)\n");
+ PrintF(out, "(null descriptor)\n");
break;
- default:
+ case NORMAL: // only in slow mode
+ case HANDLER: // only in lookup results, not in descriptors
+ case INTERCEPTOR: // only in lookup results, not in descriptors
UNREACHABLE();
break;
}
@@ -277,7 +309,10 @@ void JSObject::PrintProperties(FILE* out) {
void JSObject::PrintElements(FILE* out) {
- switch (GetElementsKind()) {
+ // Don't call GetElementsKind, its validation code can cause the printer to
+ // fail when debugging.
+ switch (map()->elements_kind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
@@ -385,8 +420,13 @@ void JSObject::PrintElements(FILE* out) {
void JSObject::JSObjectPrint(FILE* out) {
PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
- PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
+ PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
+ // Don't call GetElementsKind, its validation code can cause the printer to
+ // fail when debugging.
+ PrintElementsKind(out, this->map()->elements_kind());
+ PrintF(out,
+ "]\n - prototype = %p\n",
+ reinterpret_cast<void*>(GetPrototype()));
PrintF(out, " {\n");
PrintProperties(out);
PrintElements(out);
@@ -406,6 +446,9 @@ static const char* TypeToString(InstanceType type) {
case EXTERNAL_ASCII_SYMBOL_TYPE:
case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+ case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
+ case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
+ case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL";
case ASCII_STRING_TYPE: return "ASCII_STRING";
case STRING_TYPE: return "TWO_BYTE_STRING";
case CONS_STRING_TYPE:
@@ -413,8 +456,12 @@ static const char* TypeToString(InstanceType type) {
case EXTERNAL_ASCII_STRING_TYPE:
case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+ case FREE_SPACE_TYPE: return "FREE_SPACE";
case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -458,7 +505,9 @@ void Map::MapPrint(FILE* out) {
PrintF(out, " - type: %s\n", TypeToString(instance_type()));
PrintF(out, " - instance size: %d\n", instance_size());
PrintF(out, " - inobject properties: %d\n", inobject_properties());
- PrintF(out, " - pre-allocated property fields: %d\n",
+ PrintF(out, " - elements kind: ");
+ PrintElementsKind(out, elements_kind());
+ PrintF(out, "\n - pre-allocated property fields: %d\n",
pre_allocated_property_fields());
PrintF(out, " - unused property fields: %d\n", unused_property_fields());
if (is_hidden_prototype()) {
@@ -516,6 +565,16 @@ void FixedArray::FixedArrayPrint(FILE* out) {
}
+void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "FixedDoubleArray");
+ PrintF(out, " - length: %d", length());
+ for (int i = 0; i < length(); i++) {
+ PrintF(out, "\n [%d]: %g", i, get_scalar(i));
+ }
+ PrintF(out, "\n");
+}
+
+
void JSValue::JSValuePrint(FILE* out) {
HeapObject::PrintHeader(out, "ValueObject");
value()->Print(out);
@@ -587,6 +646,8 @@ void JSProxy::JSProxyPrint(FILE* out) {
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - handler = ");
handler()->Print(out);
+ PrintF(out, " - hash = ");
+ hash()->Print(out);
PrintF(out, "\n");
}
@@ -607,7 +668,6 @@ void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
void JSWeakMap::JSWeakMapPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSWeakMap");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
PrintF(out, " - table = ");
table()->ShortPrint(out);
PrintF(out, "\n");
@@ -707,7 +767,7 @@ void Code::CodePrint(FILE* out) {
void Foreign::ForeignPrint(FILE* out) {
- PrintF(out, "foreign address : %p", address());
+ PrintF(out, "foreign address : %p", foreign_address());
}
@@ -802,10 +862,15 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "ObjectTemplateInfo");
+ PrintF(out, " - tag: ");
+ tag()->ShortPrint(out);
+ PrintF(out, "\n - property_list: ");
+ property_list()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
PrintF(out, "\n - internal_field_count: ");
internal_field_count()->ShortPrint(out);
+ PrintF(out, "\n");
}
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
new file mode 100644
index 000000000..12b044ca9
--- /dev/null
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -0,0 +1,151 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_VISITING_INL_H_
+#define V8_OBJECTS_VISITING_INL_H_
+
+
+namespace v8 {
+namespace internal {
+
+template<typename StaticVisitor>
+void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
+ table_.Register(kVisitShortcutCandidate,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitConsString,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitSlicedString,
+ &FixedBodyVisitor<StaticVisitor,
+ SlicedString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
+ table_.Register(kVisitGlobalContext,
+ &FixedBodyVisitor<StaticVisitor,
+ Context::ScavengeBodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitByteArray, &VisitByteArray);
+
+ table_.Register(kVisitSharedFunctionInfo,
+ &FixedBodyVisitor<StaticVisitor,
+ SharedFunctionInfo::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+ table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+ table_.Register(kVisitJSFunction,
+ &JSObjectVisitor::
+ template VisitSpecialized<JSFunction::kSize>);
+
+ table_.Register(kVisitFreeSpace, &VisitFreeSpace);
+
+ table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
+
+ table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
+
+ table_.template RegisterSpecializations<DataObjectVisitor,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
+
+ table_.template RegisterSpecializations<JSObjectVisitor,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+ table_.template RegisterSpecializations<StructVisitor,
+ kVisitStruct,
+ kVisitStructGeneric>();
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ // There are two places where we iterate code bodies: here and the
+ // templated CodeIterateBody (below). They should be kept in sync.
+ IteratePointer(v, kRelocationInfoOffset);
+ IteratePointer(v, kHandlerTableOffset);
+ IteratePointer(v, kDeoptimizationDataOffset);
+
+ RelocIterator it(this, mode_mask);
+ for (; !it.done(); it.next()) {
+ it.rinfo()->Visit(v);
+ }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ // There are two places where we iterate code bodies: here and the
+ // non-templated CodeIterateBody (above). They should be kept in sync.
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+
+ RelocIterator it(this, mode_mask);
+ for (; !it.done(); it.next()) {
+ it.rinfo()->template Visit<StaticVisitor>(heap);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_OBJECTS_VISITING_INL_H_
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 0aa21dd6e..9ca102b2f 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -64,7 +64,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case kExternalStringTag:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
- ExternalString::kSize);
+ instance_size);
}
UNREACHABLE();
}
@@ -73,6 +73,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case BYTE_ARRAY_TYPE:
return kVisitByteArray;
+ case FREE_SPACE_TYPE:
+ return kVisitFreeSpace;
+
case FIXED_ARRAY_TYPE:
return kVisitFixedArray;
@@ -91,6 +94,16 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_GLOBAL_PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
+ case JS_SET_TYPE:
+ return GetVisitorIdForSize(kVisitStruct,
+ kVisitStructGeneric,
+ JSSet::kSize);
+
+ case JS_MAP_TYPE:
+ return GetVisitorIdForSize(kVisitStruct,
+ kVisitStructGeneric,
+ JSMap::kSize);
+
case JS_WEAK_MAP_TYPE:
return kVisitJSWeakMap;
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 4ce1bd077..e6ddfed4a 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -30,22 +30,6 @@
#include "allocation.h"
-#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
// Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -67,6 +51,7 @@ class StaticVisitorBase : public AllStatic {
kVisitSeqTwoByteString,
kVisitShortcutCandidate,
kVisitByteArray,
+ kVisitFreeSpace,
kVisitFixedArray,
kVisitFixedDoubleArray,
kVisitGlobalContext,
@@ -172,6 +157,10 @@ class VisitorDispatchTable {
}
}
+ inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
+ return reinterpret_cast<Callback>(callbacks_[id]);
+ }
+
inline Callback GetVisitor(Map* map) {
return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
}
@@ -236,7 +225,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->heap(),
+ map->GetHeap(),
object,
BodyDescriptor::kStartOffset,
object_size);
@@ -247,7 +236,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->heap(),
+ map->GetHeap(),
object,
BodyDescriptor::kStartOffset,
object_size);
@@ -261,7 +250,7 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->heap(),
+ map->GetHeap(),
object,
BodyDescriptor::kStartOffset,
BodyDescriptor::kEndOffset);
@@ -289,63 +278,7 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
template<typename StaticVisitor>
class StaticNewSpaceVisitor : public StaticVisitorBase {
public:
- static void Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor,
- SlicedString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
- table_.Register(kVisitGlobalContext,
- &FixedBodyVisitor<StaticVisitor,
- Context::ScavengeBodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitByteArray, &VisitByteArray);
-
- table_.Register(kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticVisitor,
- SharedFunctionInfo::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitJSWeakMap, &VisitJSObject);
-
- table_.Register(kVisitJSRegExp, &VisitJSObject);
-
- table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
- table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
- table_.Register(kVisitJSFunction,
- &JSObjectVisitor::
- template VisitSpecialized<JSFunction::kSize>);
-
- table_.RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
- table_.RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
- table_.RegisterSpecializations<StructVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
- }
+ static void Initialize();
static inline int IterateBody(Map* map, HeapObject* obj) {
return table_.GetVisitor(map)(map, obj);
@@ -379,6 +312,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
SeqTwoByteStringSize(map->instance_type());
}
+ static inline int VisitFreeSpace(Map* map, HeapObject* object) {
+ return FreeSpace::cast(object)->Size();
+ }
+
class DataObjectVisitor {
public:
template<int object_size>
@@ -410,55 +347,6 @@ VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
StaticNewSpaceVisitor<StaticVisitor>::table_;
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // Use the relocation info pointer before it is visited by
- // the heap compaction in the next statement.
- RelocIterator it(this, mode_mask);
-
- IteratePointer(v, kRelocationInfoOffset);
- IteratePointer(v, kDeoptimizationDataOffset);
-
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
- }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- // Use the relocation info pointer before it is visited by
- // the heap compaction in the next statement.
- RelocIterator it(this, mode_mask);
-
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-
- for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>(heap);
- }
-}
-
-
} } // namespace v8::internal
#endif // V8_OBJECTS_VISITING_H_
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 6085b4ef2..1565504c2 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -39,7 +39,9 @@
#include "hydrogen.h"
#include "objects-inl.h"
#include "objects-visiting.h"
+#include "objects-visiting-inl.h"
#include "macro-assembler.h"
+#include "mark-compact.h"
#include "safepoint-table.h"
#include "string-stream.h"
#include "utils.h"
@@ -53,10 +55,53 @@
namespace v8 {
namespace internal {
-// Getters and setters are stored in a fixed array property. These are
-// constants for their indices.
-const int kGetterIndex = 0;
-const int kSetterIndex = 1;
+void PrintElementsKind(FILE* out, ElementsKind kind) {
+ switch (kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
+ PrintF(out, "FAST_SMI_ONLY_ELEMENTS");
+ break;
+ case FAST_ELEMENTS:
+ PrintF(out, "FAST_ELEMENTS");
+ break;
+ case FAST_DOUBLE_ELEMENTS:
+ PrintF(out, "FAST_DOUBLE_ELEMENTS");
+ break;
+ case DICTIONARY_ELEMENTS:
+ PrintF(out, "DICTIONARY_ELEMENTS");
+ break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ PrintF(out, "NON_STRICT_ARGUMENTS_ELEMENTS");
+ break;
+ case EXTERNAL_BYTE_ELEMENTS:
+ PrintF(out, "EXTERNAL_BYTE_ELEMENTS");
+ break;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ PrintF(out, "EXTERNAL_UNSIGNED_BYTE_ELEMENTS");
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ PrintF(out, "EXTERNAL_SHORT_ELEMENTS");
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ PrintF(out, "EXTERNAL_UNSIGNED_SHORT_ELEMENTS");
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ PrintF(out, "EXTERNAL_INT_ELEMENTS");
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ PrintF(out, "EXTERNAL_UNSIGNED_INT_ELEMENTS");
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ PrintF(out, "EXTERNAL_FLOAT_ELEMENTS");
+ break;
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
+ break;
+ }
+}
+
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
@@ -132,34 +177,27 @@ Object* Object::ToBoolean() {
void Object::Lookup(String* name, LookupResult* result) {
Object* holder = NULL;
- if (IsSmi()) {
- Context* global_context = Isolate::Current()->context()->global_context();
- holder = global_context->number_function()->instance_prototype();
+ if (IsJSReceiver()) {
+ holder = this;
} else {
- HeapObject* heap_object = HeapObject::cast(this);
- if (heap_object->IsJSObject()) {
- return JSObject::cast(this)->Lookup(name, result);
- } else if (heap_object->IsJSProxy()) {
- return result->HandlerResult();
- }
Context* global_context = Isolate::Current()->context()->global_context();
- if (heap_object->IsString()) {
- holder = global_context->string_function()->instance_prototype();
- } else if (heap_object->IsHeapNumber()) {
+ if (IsNumber()) {
holder = global_context->number_function()->instance_prototype();
- } else if (heap_object->IsBoolean()) {
+ } else if (IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
}
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
- JSObject::cast(holder)->Lookup(name, result);
+ JSReceiver::cast(holder)->Lookup(name, result);
}
MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
String* name,
PropertyAttributes* attributes) {
- LookupResult result;
+ LookupResult result(name->GetIsolate());
Lookup(name, &result);
MaybeObject* value = GetProperty(receiver, &result, name, attributes);
ASSERT(*attributes <= ABSENT);
@@ -167,10 +205,9 @@ MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
}
-MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name,
- Object* holder) {
+MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
+ Object* structure,
+ String* name) {
Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -178,7 +215,7 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->address());
+ Foreign::cast(structure)->foreign_address());
MaybeObject* value = (callback->getter)(receiver, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value;
@@ -191,10 +228,9 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
- JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(isolate, data->data(), self, holder_handle);
+ CustomArguments args(isolate, data->data(), self, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
@@ -212,9 +248,9 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
// __defineGetter__ callback
if (structure->IsFixedArray()) {
Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
- if (getter->IsJSFunction()) {
- return Object::GetPropertyWithDefinedGetter(receiver,
- JSFunction::cast(getter));
+ if (getter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
}
// Getter is not a function.
return isolate->heap()->undefined_value();
@@ -225,47 +261,72 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
}
-MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
- String* name_raw,
- Object* handler_raw) {
- Isolate* isolate = name_raw->GetIsolate();
+MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
+ String* name_raw) {
+ Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
Handle<Object> receiver(receiver_raw);
Handle<Object> name(name_raw);
- Handle<Object> handler(handler_raw);
- // Extract trap function.
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<Object> args[] = { receiver, name };
+ Handle<Object> result = CallTrap(
+ "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Failure::Exception();
- if (trap->IsUndefined()) {
- // Get the derived `get' property.
- trap = isolate->derived_get_trap();
- }
-
- // Call trap function.
- Object** args[] = { receiver.location(), name.location() };
- bool has_exception;
- Handle<Object> result =
- Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
- if (has_exception) return Failure::Exception();
return *result;
}
+Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
+ Isolate* isolate = object->IsHeapObject()
+ ? Handle<HeapObject>::cast(object)->GetIsolate()
+ : Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
+}
+
+
+MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
+ uint32_t index) {
+ String* name;
+ MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+ if (!maybe->To<String>(&name)) return maybe;
+ return GetPropertyWithHandler(receiver, name);
+}
+
+
+MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode) {
+ String* name;
+ MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+ if (!maybe->To<String>(&name)) return maybe;
+ return SetPropertyWithHandler(name, value, NONE, strict_mode);
+}
+
+
+bool JSProxy::HasElementWithHandler(uint32_t index) {
+ String* name;
+ MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+ if (!maybe->To<String>(&name)) return maybe;
+ return HasPropertyWithHandler(name);
+}
+
+
MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
- JSFunction* getter) {
+ JSReceiver* getter) {
HandleScope scope;
- Handle<JSFunction> fun(JSFunction::cast(getter));
+ Handle<JSReceiver> fun(getter);
Handle<Object> self(receiver);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = fun->GetHeap()->isolate()->debug();
// Handle stepping into a getter if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+ // TODO(rossberg): should this apply to getters that are function proxies?
+ if (debug->StepInActive() && fun->IsJSFunction()) {
+ debug->HandleStepIn(
+ Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
}
#endif
+
bool has_pending_exception;
Handle<Object> result =
Execution::Call(fun, self, 0, NULL, &has_pending_exception);
@@ -290,10 +351,8 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
AccessorInfo* info = AccessorInfo::cast(obj);
if (info->all_can_read()) {
*attributes = result->GetAttributes();
- return GetPropertyWithCallback(receiver,
- result->GetCallbackObject(),
- name,
- result->holder());
+ return result->holder()->GetPropertyWithCallback(
+ receiver, result->GetCallbackObject(), name);
}
}
break;
@@ -302,7 +361,7 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
case FIELD:
case CONSTANT_FUNCTION: {
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r;
+ LookupResult r(GetIsolate());
result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
if (r.IsProperty()) {
return GetPropertyWithFailedAccessCheck(receiver,
@@ -315,7 +374,7 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r;
+ LookupResult r(GetIsolate());
result->holder()->LookupRealNamedProperty(name, &r);
if (r.IsProperty()) {
return GetPropertyWithFailedAccessCheck(receiver,
@@ -362,7 +421,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case CONSTANT_FUNCTION: {
if (!continue_search) break;
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r;
+ LookupResult r(GetIsolate());
result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
if (r.IsProperty()) {
return GetPropertyAttributeWithFailedAccessCheck(receiver,
@@ -376,7 +435,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r;
+ LookupResult r(GetIsolate());
if (continue_search) {
result->holder()->LookupRealNamedProperty(name, &r);
} else {
@@ -396,7 +455,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
}
}
- GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return ABSENT;
}
@@ -486,7 +545,7 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
}
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(cell->heap()->the_hole_value());
+ cell->set_value(cell->GetHeap()->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
Object* deleted = dictionary->DeleteProperty(entry, mode);
@@ -520,6 +579,21 @@ bool JSObject::IsDirty() {
}
+Handle<Object> Object::GetProperty(Handle<Object> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<String> key,
+ PropertyAttributes* attributes) {
+ Isolate* isolate = object->IsHeapObject()
+ ? Handle<HeapObject>::cast(object)->GetIsolate()
+ : Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate,
+ object->GetProperty(*receiver, result, *key, attributes),
+ Object);
+}
+
+
MaybeObject* Object::GetProperty(Object* receiver,
LookupResult* result,
String* name,
@@ -537,7 +611,9 @@ MaybeObject* Object::GetProperty(Object* receiver,
// holder in the prototype chain.
// Proxy handlers do not use the proxy's prototype, so we can skip this.
if (!result->IsHandler()) {
- Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+ Object* last = result->IsProperty()
+ ? result->holder()
+ : Object::cast(heap->null_value());
ASSERT(this != this->GetPrototype());
for (Object* current = this; true; current = current->GetPrototype()) {
if (current->IsAccessCheckNeeded()) {
@@ -566,30 +642,26 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
*attributes = result->GetAttributes();
Object* value;
- JSObject* holder = result->holder();
switch (result->type()) {
case NORMAL:
- value = holder->GetNormalizedProperty(result);
+ value = result->holder()->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case FIELD:
- value = holder->FastPropertyAt(result->GetFieldIndex());
+ value = result->holder()->FastPropertyAt(result->GetFieldIndex());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS:
- return GetPropertyWithCallback(receiver,
- result->GetCallbackObject(),
- name,
- holder);
- case HANDLER: {
- JSProxy* proxy = JSProxy::cast(this);
- return GetPropertyWithHandler(receiver, name, proxy->handler());
- }
+ return result->holder()->GetPropertyWithCallback(
+ receiver, result->GetCallbackObject(), name);
+ case HANDLER:
+ return result->proxy()->GetPropertyWithHandler(receiver, name);
case INTERCEPTOR: {
JSObject* recvr = JSObject::cast(receiver);
- return holder->GetPropertyWithInterceptor(recvr, name, attributes);
+ return result->holder()->GetPropertyWithInterceptor(
+ recvr, name, attributes);
}
case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
@@ -613,28 +685,21 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
for (holder = this;
holder != heap->null_value();
holder = holder->GetPrototype()) {
- if (holder->IsSmi()) {
- Context* global_context = Isolate::Current()->context()->global_context();
- holder = global_context->number_function()->instance_prototype();
- } else {
- HeapObject* heap_object = HeapObject::cast(holder);
- if (!heap_object->IsJSObject()) {
- Isolate* isolate = heap->isolate();
- Context* global_context = isolate->context()->global_context();
- if (heap_object->IsString()) {
- holder = global_context->string_function()->instance_prototype();
- } else if (heap_object->IsHeapNumber()) {
- holder = global_context->number_function()->instance_prototype();
- } else if (heap_object->IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
- } else if (heap_object->IsJSProxy()) {
- // TODO(rossberg): do something
- return heap->undefined_value(); // For now...
- } else {
- // Undefined and null have no indexed properties.
- ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
- return heap->undefined_value();
- }
+ if (!holder->IsJSObject()) {
+ Isolate* isolate = heap->isolate();
+ Context* global_context = isolate->context()->global_context();
+ if (holder->IsNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (holder->IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (holder->IsBoolean()) {
+ holder = global_context->boolean_function()->instance_prototype();
+ } else if (holder->IsJSProxy()) {
+ return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+ } else {
+ // Undefined and null have no indexed properties.
+ ASSERT(holder->IsUndefined() || holder->IsNull());
+ return heap->undefined_value();
}
}
@@ -701,6 +766,49 @@ Object* Object::GetPrototype() {
}
+MaybeObject* Object::GetHash(CreationFlag flag) {
+ // The object is either a number, a string, an odd-ball,
+ // a real JS object, or a Harmony proxy.
+ if (IsNumber()) {
+ uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
+ if (IsString()) {
+ uint32_t hash = String::cast(this)->Hash();
+ return Smi::FromInt(hash);
+ }
+ if (IsOddball()) {
+ uint32_t hash = Oddball::cast(this)->to_string()->Hash();
+ return Smi::FromInt(hash);
+ }
+ if (IsJSReceiver()) {
+ return JSReceiver::cast(this)->GetIdentityHash(flag);
+ }
+
+ UNREACHABLE();
+ return Smi::FromInt(0);
+}
+
+
+bool Object::SameValue(Object* other) {
+ if (other == this) return true;
+ if (!IsHeapObject() || !other->IsHeapObject()) return false;
+
+ // The object is either a number, a string, an odd-ball,
+ // a real JS object, or a Harmony proxy.
+ if (IsNumber() && other->IsNumber()) {
+ double this_value = Number();
+ double other_value = other->Number();
+ return (this_value == other_value) ||
+ (isnan(this_value) && isnan(other_value));
+ }
+ if (IsString() && other->IsString()) {
+ return String::cast(this)->Equals(String::cast(other));
+ }
+ return false;
+}
+
+
void Object::ShortPrint(FILE* out) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
@@ -818,7 +926,7 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
len - first_length);
}
cs->set_first(result);
- cs->set_second(heap->empty_string());
+ cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
return result;
}
default:
@@ -844,39 +952,39 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
#endif // DEBUG
Heap* heap = GetHeap();
int size = this->Size(); // Byte size of the original string.
- if (size < ExternalString::kSize) {
- // The string is too small to fit an external String in its place. This can
- // only happen for zero length strings.
+ if (size < ExternalString::kShortSize) {
return false;
}
- ASSERT(size >= ExternalString::kSize);
bool is_ascii = this->IsAsciiRepresentation();
bool is_symbol = this->IsSymbol();
- int length = this->length();
- int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(is_ascii ?
- heap->external_string_with_ascii_data_map() :
- heap->external_string_map());
+ if (size >= ExternalString::kSize) {
+ this->set_map(
+ is_symbol
+ ? (is_ascii ? heap->external_symbol_with_ascii_data_map()
+ : heap->external_symbol_map())
+ : (is_ascii ? heap->external_string_with_ascii_data_map()
+ : heap->external_string_map()));
+ } else {
+ this->set_map(
+ is_symbol
+ ? (is_ascii ? heap->short_external_symbol_with_ascii_data_map()
+ : heap->short_external_symbol_map())
+ : (is_ascii ? heap->short_external_string_with_ascii_data_map()
+ : heap->short_external_string_map()));
+ }
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
- self->set_length(length);
- self->set_hash_field(hash_field);
self->set_resource(resource);
- // Additionally make the object into an external symbol if the original string
- // was a symbol to start with.
- if (is_symbol) {
- self->Hash(); // Force regeneration of the hash value.
- // Now morph this external string into a external symbol.
- this->set_map(is_ascii ?
- heap->external_symbol_with_ascii_data_map() :
- heap->external_symbol_map());
- }
+ if (is_symbol) self->Hash(); // Force regeneration of the hash value.
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+ MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+ }
return true;
}
@@ -895,34 +1003,30 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
#endif // DEBUG
Heap* heap = GetHeap();
int size = this->Size(); // Byte size of the original string.
- if (size < ExternalString::kSize) {
- // The string is too small to fit an external String in its place. This can
- // only happen for zero length strings.
+ if (size < ExternalString::kShortSize) {
return false;
}
- ASSERT(size >= ExternalString::kSize);
bool is_symbol = this->IsSymbol();
- int length = this->length();
- int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
- // reinitializing the fields.
- this->set_map(heap->external_ascii_string_map());
+ // reinitializing the fields. Use short version if space is limited.
+ if (size >= ExternalString::kSize) {
+ this->set_map(is_symbol ? heap->external_ascii_symbol_map()
+ : heap->external_ascii_string_map());
+ } else {
+ this->set_map(is_symbol ? heap->short_external_ascii_symbol_map()
+ : heap->short_external_ascii_string_map());
+ }
ExternalAsciiString* self = ExternalAsciiString::cast(this);
- self->set_length(length);
- self->set_hash_field(hash_field);
self->set_resource(resource);
- // Additionally make the object into an external symbol if the original string
- // was a symbol to start with.
- if (is_symbol) {
- self->Hash(); // Force regeneration of the hash value.
- // Now morph this external string into a external symbol.
- this->set_map(heap->external_ascii_symbol_map());
- }
+ if (is_symbol) self->Hash(); // Force regeneration of the hash value.
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+ MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+ }
return true;
}
@@ -998,8 +1102,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
break;
}
case JS_WEAK_MAP_TYPE: {
- int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
- accumulator->Add("<JS WeakMap[%d]>", elements);
+ accumulator->Add("<JS WeakMap>");
break;
}
case JS_REGEXP_TYPE: {
@@ -1027,7 +1130,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
// JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
default: {
Map* map_of_this = map();
- Heap* heap = map_of_this->heap();
+ Heap* heap = GetHeap();
Object* constructor = map_of_this->constructor();
bool printed = false;
if (constructor->IsHeapObject() &&
@@ -1049,7 +1152,6 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
global_object ? "Global Object: " : "",
vowel ? "n" : "");
accumulator->Put(str);
- accumulator->Put('>');
printed = true;
}
}
@@ -1070,8 +1172,28 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
+void JSObject::PrintElementsTransition(
+ FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
+ ElementsKind to_kind, FixedArrayBase* to_elements) {
+ if (from_kind != to_kind) {
+ PrintF(file, "elements transition [");
+ PrintElementsKind(file, from_kind);
+ PrintF(file, " -> ");
+ PrintElementsKind(file, to_kind);
+ PrintF(file, "] in ");
+ JavaScriptFrame::PrintTop(file, false, true);
+ PrintF(file, " for ");
+ ShortPrint(file);
+ PrintF(file, " from ");
+ from_elements->ShortPrint(file);
+ PrintF(file, " to ");
+ to_elements->ShortPrint(file);
+ PrintF(file, "\n");
+ }
+}
+
+
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
- // if (!HEAP->InNewSpace(this)) PrintF("*", this);
Heap* heap = GetHeap();
if (!heap->Contains(this)) {
accumulator->Add("!!!INVALID POINTER!!!");
@@ -1094,14 +1216,21 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
}
switch (map()->instance_type()) {
case MAP_TYPE:
- accumulator->Add("<Map>");
+ accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
break;
case FIXED_ARRAY_TYPE:
accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ accumulator->Add("<FixedDoubleArray[%u]>",
+ FixedDoubleArray::cast(this)->length());
+ break;
case BYTE_ARRAY_TYPE:
accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
break;
+ case FREE_SPACE_TYPE:
+ accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
+ break;
case EXTERNAL_PIXEL_ARRAY_TYPE:
accumulator->Add("<ExternalPixelArray[%u]>",
ExternalPixelArray::cast(this)->length());
@@ -1241,6 +1370,8 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_VALUE_TYPE:
case JS_ARRAY_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
@@ -1277,6 +1408,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
case EXTERNAL_PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -1533,7 +1665,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
// If the old map is the global object map (from new Object()),
// then transitions are not added to it, so we are done.
- Heap* heap = old_map->heap();
+ Heap* heap = GetHeap();
if (old_map == heap->isolate()->context()->global_context()->
object_function()->map()) {
return function;
@@ -1609,7 +1741,7 @@ MaybeObject* JSObject::AddProperty(String* name,
StrictModeFlag strict_mode) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
- Heap* heap = map_of_this->heap();
+ Heap* heap = GetHeap();
if (!map_of_this->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return heap->undefined_value();
@@ -1651,13 +1783,21 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
// Check local property, ignore interceptor.
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsFound()) {
// An existing property, a map transition or a null descriptor was
// found. Use set property to handle all these cases.
return SetProperty(&result, name, value, attributes, strict_mode);
}
+ bool found = false;
+ MaybeObject* result_object;
+ result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+ value,
+ attributes,
+ &found,
+ strict_mode);
+ if (found) return result_object;
// Add a new real property.
return AddProperty(name, value, attributes, strict_mode);
}
@@ -1696,7 +1836,7 @@ MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
return result;
}
// Do not add transitions to the map of "new Object()".
- if (map() == old_map->heap()->isolate()->context()->global_context()->
+ if (map() == GetIsolate()->context()->global_context()->
object_function()->map()) {
return result;
}
@@ -1825,7 +1965,7 @@ MaybeObject* JSReceiver::SetProperty(String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookup(name, &result);
return SetProperty(&result, name, value, attributes, strict_mode);
}
@@ -1850,7 +1990,7 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->address());
+ Foreign::cast(structure)->foreign_address());
MaybeObject* obj = (callback->setter)(this, value, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (obj->IsFailure()) return obj;
@@ -1880,8 +2020,9 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsFixedArray()) {
Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
- if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+ if (setter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
@@ -1900,22 +2041,24 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
}
-MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
- Object* value) {
+MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
+ Object* value) {
Isolate* isolate = GetIsolate();
Handle<Object> value_handle(value, isolate);
- Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
- Handle<JSObject> self(this, isolate);
+ Handle<JSReceiver> fun(setter, isolate);
+ Handle<JSReceiver> self(this, isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+ // TODO(rossberg): should this apply to getters that are function proxies?
+ if (debug->StepInActive() && fun->IsJSFunction()) {
+ debug->HandleStepIn(
+ Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
}
#endif
bool has_pending_exception;
- Object** argv[] = { value_handle.location() };
- Execution::Call(fun, self, 1, argv, &has_pending_exception);
+ Handle<Object> argv[] = { value_handle };
+ Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *value_handle;
@@ -1928,6 +2071,9 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
for (Object* pt = GetPrototype();
pt != heap->null_value();
pt = pt->GetPrototype()) {
+ if (pt->IsJSProxy()) {
+ return result->HandlerResult(JSProxy::cast(pt));
+ }
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty()) {
if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
@@ -1948,6 +2094,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
for (Object* pt = GetPrototype();
pt != heap->null_value();
pt = pt->GetPrototype()) {
+ if (pt->IsJSProxy()) {
+ String* name;
+ MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+ if (!maybe->To<String>(&name)) {
+ *found = true; // Force abort
+ return maybe;
+ }
+ return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
+ name, value, NONE, strict_mode, found);
+ }
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
}
@@ -1969,6 +2125,48 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
return heap->the_hole_value();
}
+MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ bool* found,
+ StrictModeFlag strict_mode) {
+ Heap* heap = GetHeap();
+ // We could not find a local property so let's check whether there is an
+ // accessor that wants to handle the property.
+ LookupResult accessor_result(heap->isolate());
+ LookupCallbackSetterInPrototypes(name, &accessor_result);
+ if (accessor_result.IsFound()) {
+ *found = true;
+ if (accessor_result.type() == CALLBACKS) {
+ return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+ name,
+ value,
+ accessor_result.holder(),
+ strict_mode);
+ } else if (accessor_result.type() == HANDLER) {
+ // There is a proxy in the prototype chain. Invoke its
+ // getPropertyDescriptor trap.
+ bool found = false;
+ // SetPropertyWithHandlerIfDefiningSetter can cause GC,
+ // make sure to use the handlified references after calling
+ // the function.
+ Handle<JSObject> self(this);
+ Handle<String> hname(name);
+ Handle<Object> hvalue(value);
+ MaybeObject* result =
+ accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
+ name, value, attributes, strict_mode, &found);
+ if (found) return result;
+ // The proxy does not define the property as an accessor.
+ // Consequently, it has no effect on setting the receiver.
+ return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
+ }
+ }
+ *found = false;
+ return heap->the_hole_value();
+}
+
void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors();
@@ -1985,7 +2183,8 @@ void Map::LookupInDescriptors(JSObject* holder,
String* name,
LookupResult* result) {
DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+ DescriptorLookupCache* cache =
+ GetHeap()->isolate()->descriptor_lookup_cache();
int number = cache->Lookup(descriptors, name);
if (number == DescriptorLookupCache::kAbsent) {
number = descriptors->Search(name);
@@ -1999,75 +2198,293 @@ void Map::LookupInDescriptors(JSObject* holder,
}
-MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
- bool safe_to_add_transition) {
- Heap* current_heap = heap();
+static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
+ ASSERT(!map.is_null());
+ for (int i = 0; i < maps->length(); ++i) {
+ if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
+ }
+ return false;
+}
+
+
+template <class T>
+static Handle<T> MaybeNull(T* p) {
+ if (p == NULL) return Handle<T>::null();
+ return Handle<T>(p);
+}
+
+
+Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
+ ElementsKind elms_kind = elements_kind();
+ if (elms_kind == FAST_DOUBLE_ELEMENTS) {
+ bool dummy = true;
+ Handle<Map> fast_map =
+ MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
+ if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+ return fast_map;
+ }
+ return Handle<Map>::null();
+ }
+ if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
+ bool dummy = true;
+ Handle<Map> double_map =
+ MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
+ // In the current implementation, if the DOUBLE map doesn't exist, the
+ // FAST map can't exist either.
+ if (double_map.is_null()) return Handle<Map>::null();
+ Handle<Map> fast_map =
+ MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+ &dummy));
+ if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+ return fast_map;
+ }
+ if (ContainsMap(candidates, double_map)) return double_map;
+ }
+ return Handle<Map>::null();
+}
+
+static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
+ ElementsKind elements_kind) {
+ if (descriptor_contents->IsMap()) {
+ Map* map = Map::cast(descriptor_contents);
+ if (map->elements_kind() == elements_kind) {
+ return map;
+ }
+ return NULL;
+ }
+
+ FixedArray* map_array = FixedArray::cast(descriptor_contents);
+ for (int i = 0; i < map_array->length(); ++i) {
+ Object* current = map_array->get(i);
+ // Skip undefined slots, they are sentinels for reclaimed maps.
+ if (!current->IsUndefined()) {
+ Map* current_map = Map::cast(map_array->get(i));
+ if (current_map->elements_kind() == elements_kind) {
+ return current_map;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+static MaybeObject* AddElementsTransitionMapToDescriptor(
+ Object* descriptor_contents,
+ Map* new_map) {
+ // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
+ // simply add the map.
+ if (descriptor_contents == NULL) {
+ return new_map;
+ }
+
+ // There was already a map in the descriptor, create a 2-element FixedArray
+ // to contain the existing map plus the new one.
+ FixedArray* new_array;
+ Heap* heap = new_map->GetHeap();
+ if (descriptor_contents->IsMap()) {
+ // Must tenure, DescriptorArray expects no new-space objects.
+ MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
+ if (!maybe_new_array->To<FixedArray>(&new_array)) {
+ return maybe_new_array;
+ }
+ new_array->set(0, descriptor_contents);
+ new_array->set(1, new_map);
+ return new_array;
+ }
+
+ // The descriptor already contained a list of maps for different ElementKinds
+ // of ELEMENTS_TRANSITION, first check the existing array for an undefined
+ // slot, and if that's not available, create a FixedArray to hold the existing
+ // maps plus the new one and fill it in.
+ FixedArray* array = FixedArray::cast(descriptor_contents);
+ for (int i = 0; i < array->length(); ++i) {
+ if (array->get(i)->IsUndefined()) {
+ array->set(i, new_map);
+ return array;
+ }
+ }
+
+ // Must tenure, DescriptorArray expects no new-space objects.
+ MaybeObject* maybe_new_array =
+ heap->AllocateFixedArray(array->length() + 1, TENURED);
+ if (!maybe_new_array->To<FixedArray>(&new_array)) {
+ return maybe_new_array;
+ }
+ int i = 0;
+ while (i < array->length()) {
+ new_array->set(i, array->get(i));
+ ++i;
+ }
+ new_array->set(i, new_map);
+ return new_array;
+}
+
+
+String* Map::elements_transition_sentinel_name() {
+ return GetHeap()->empty_symbol();
+}
+
+
+Object* Map::GetDescriptorContents(String* sentinel_name,
+ bool* safe_to_add_transition) {
+ // Get the cached index for the descriptors lookup, or find and cache it.
DescriptorArray* descriptors = instance_descriptors();
- String* elements_transition_sentinel_name = current_heap->empty_symbol();
+ DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+ int index = cache->Lookup(descriptors, sentinel_name);
+ if (index == DescriptorLookupCache::kAbsent) {
+ index = descriptors->Search(sentinel_name);
+ cache->Update(descriptors, sentinel_name, index);
+ }
+ // If the transition already exists, return its descriptor.
+ if (index != DescriptorArray::kNotFound) {
+ PropertyDetails details(descriptors->GetDetails(index));
+ if (details.type() == ELEMENTS_TRANSITION) {
+ return descriptors->GetValue(index);
+ } else {
+ *safe_to_add_transition = false;
+ }
+ }
+ return NULL;
+}
+
+
+Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
+ bool* safe_to_add_transition) {
+ // Special case: indirect SMI->FAST transition (cf. comment in
+ // AddElementsTransition()).
+ if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ elements_kind == FAST_ELEMENTS) {
+ Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
+ safe_to_add_transition);
+ if (double_map == NULL) return double_map;
+ return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+ safe_to_add_transition);
+ }
+ Object* descriptor_contents = GetDescriptorContents(
+ elements_transition_sentinel_name(), safe_to_add_transition);
+ if (descriptor_contents != NULL) {
+ Map* maybe_transition_map =
+ GetElementsTransitionMapFromDescriptor(descriptor_contents,
+ elements_kind);
+ ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
+ return maybe_transition_map;
+ }
+ return NULL;
+}
+
+
+MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
+ Map* transitioned_map) {
+ // The map transition graph should be a tree, therefore the transition
+ // from SMI to FAST elements is not done directly, but by going through
+ // DOUBLE elements first.
+ if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ elements_kind == FAST_ELEMENTS) {
+ bool safe_to_add = true;
+ Map* double_map = this->LookupElementsTransitionMap(
+ FAST_DOUBLE_ELEMENTS, &safe_to_add);
+ // This method is only called when safe_to_add_transition has been found
+ // to be true earlier.
+ ASSERT(safe_to_add);
+
+ if (double_map == NULL) {
+ MaybeObject* maybe_map = this->CopyDropTransitions();
+ if (!maybe_map->To(&double_map)) return maybe_map;
+ double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+ MaybeObject* maybe_double_transition = this->AddElementsTransition(
+ FAST_DOUBLE_ELEMENTS, double_map);
+ if (maybe_double_transition->IsFailure()) return maybe_double_transition;
+ }
+ return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
+ }
+
+ bool safe_to_add_transition = true;
+ Object* descriptor_contents = GetDescriptorContents(
+ elements_transition_sentinel_name(), &safe_to_add_transition);
+ // This method is only called when safe_to_add_transition has been found
+ // to be true earlier.
+ ASSERT(safe_to_add_transition);
+ MaybeObject* maybe_new_contents =
+ AddElementsTransitionMapToDescriptor(descriptor_contents,
+ transitioned_map);
+ Object* new_contents;
+ if (!maybe_new_contents->ToObject(&new_contents)) {
+ return maybe_new_contents;
+ }
+
+ ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
+ new_contents);
+ Object* new_descriptors;
+ MaybeObject* maybe_new_descriptors =
+ instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
+ if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
+ return maybe_new_descriptors;
+ }
+ set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ return this;
+}
+
+
+Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ object->GetElementsTransitionMap(to_kind),
+ Map);
+}
+
+
+MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind to_kind) {
+ Map* current_map = map();
+ ElementsKind from_kind = current_map->elements_kind();
+
+ if (from_kind == to_kind) return current_map;
+
+ // Only objects with FastProperties can have DescriptorArrays and can track
+ // element-related maps. Also don't add descriptors to maps that are shared.
+ bool safe_to_add_transition = HasFastProperties() &&
+ !current_map->IsUndefined() &&
+ !current_map->is_shared();
+
+ // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
+ // with elements that switch back and forth between dictionary and fast
+ // element mode.
+ if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ safe_to_add_transition = false;
+ }
if (safe_to_add_transition) {
// It's only safe to manipulate the descriptor array if it would be
// safe to add a transition.
-
- ASSERT(!is_shared()); // no transitions can be added to shared maps.
- // Check if the elements transition already exists.
- DescriptorLookupCache* cache =
- current_heap->isolate()->descriptor_lookup_cache();
- int index = cache->Lookup(descriptors, elements_transition_sentinel_name);
- if (index == DescriptorLookupCache::kAbsent) {
- index = descriptors->Search(elements_transition_sentinel_name);
- cache->Update(descriptors,
- elements_transition_sentinel_name,
- index);
- }
-
- // If the transition already exists, check the type. If there is a match,
- // return it.
- if (index != DescriptorArray::kNotFound) {
- PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
- if (details.type() == ELEMENTS_TRANSITION &&
- details.elements_kind() == elements_kind) {
- return descriptors->GetValue(index);
- } else {
- safe_to_add_transition = false;
- }
+ Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
+ to_kind, &safe_to_add_transition);
+ if (maybe_transition_map != NULL) {
+ return maybe_transition_map;
}
}
+ Map* new_map = NULL;
+
// No transition to an existing map for the given ElementsKind. Make a new
// one.
- Object* obj;
- { MaybeObject* maybe_map = CopyDropTransitions();
- if (!maybe_map->ToObject(&obj)) return maybe_map;
+ { MaybeObject* maybe_map = current_map->CopyDropTransitions();
+ if (!maybe_map->To(&new_map)) return maybe_map;
}
- Map* new_map = Map::cast(obj);
- new_map->set_elements_kind(elements_kind);
- GetIsolate()->counters()->map_to_external_array_elements()->Increment();
+ new_map->set_elements_kind(to_kind);
// Only remember the map transition if the object's map is NOT equal to the
// global object_function's map and there is not an already existing
// non-matching element transition.
- bool allow_map_transition =
- safe_to_add_transition &&
+ bool allow_map_transition = safe_to_add_transition &&
(GetIsolate()->context()->global_context()->object_function()->map() !=
map());
if (allow_map_transition) {
- // Allocate new instance descriptors for the old map with map transition.
- ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
- Map::cast(new_map),
- elements_kind);
- Object* new_descriptors;
- MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
- &desc,
- KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- descriptors = DescriptorArray::cast(new_descriptors);
- set_instance_descriptors(descriptors);
+ MaybeObject* maybe_transition =
+ current_map->AddElementsTransition(to_kind, new_map);
+ if (maybe_transition->IsFailure()) return maybe_transition;
}
-
return new_map;
}
@@ -2078,6 +2495,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
+ // A GlobalProxy's prototype should always be a proper JSObject.
return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
}
@@ -2172,7 +2590,7 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case INTERCEPTOR: {
// Try lookup real named properties. Note that only property can be
// set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r;
+ LookupResult r(GetIsolate());
LookupRealNamedProperty(name, &r);
if (r.IsProperty()) {
return SetPropertyWithFailedAccessCheck(&r,
@@ -2190,10 +2608,10 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
}
}
- Heap* heap = GetHeap();
- HandleScope scope(heap->isolate());
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
Handle<Object> value_handle(value);
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
}
@@ -2204,7 +2622,7 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
if (result->IsFound() && result->type() == HANDLER) {
- return JSProxy::cast(this)->SetPropertyWithHandler(
+ return result->proxy()->SetPropertyWithHandler(
key, value, attributes, strict_mode);
} else {
return JSObject::cast(this)->SetPropertyForResult(
@@ -2218,22 +2636,11 @@ bool JSProxy::HasPropertyWithHandler(String* name_raw) {
HandleScope scope(isolate);
Handle<Object> receiver(this);
Handle<Object> name(name_raw);
- Handle<Object> handler(this->handler());
- // Extract trap function.
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<Object> args[] = { name };
+ Handle<Object> result = CallTrap(
+ "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Failure::Exception();
- if (trap->IsUndefined()) {
- trap = isolate->derived_has_trap();
- }
-
- // Call trap function.
- Object** args[] = { name.location() };
- bool has_exception;
- Handle<Object> result =
- Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
- if (has_exception) return Failure::Exception();
return result->ToBoolean()->IsTrue();
}
@@ -2249,24 +2656,85 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
Handle<Object> receiver(this);
Handle<Object> name(name_raw);
Handle<Object> value(value_raw);
- Handle<Object> handler(this->handler());
- // Extract trap function.
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<Object> args[] = { receiver, name, value };
+ CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Failure::Exception();
- if (trap->IsUndefined()) {
- trap = isolate->derived_set_trap();
- }
- // Call trap function.
- Object** args[] = {
- receiver.location(), name.location(), value.location()
- };
- bool has_exception;
- Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
- if (has_exception) return Failure::Exception();
+ return *value;
+}
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
+ String* name_raw,
+ Object* value_raw,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool* found) {
+ *found = true; // except where defined otherwise...
+ Isolate* isolate = GetHeap()->isolate();
+ Handle<JSProxy> proxy(this);
+ Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Handle<String> name(name_raw);
+ Handle<Object> value(value_raw);
+ Handle<Object> args[] = { name };
+ Handle<Object> result = proxy->CallTrap(
+ "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Failure::Exception();
+
+ if (!result->IsUndefined()) {
+ // The proxy handler cares about this property.
+ // Check whether it is virtualized as an accessor.
+ // Emulate [[GetProperty]] semantics for proxies.
+ bool has_pending_exception;
+ Handle<Object> argv[] = { result };
+ Handle<Object> desc =
+ Execution::Call(isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+
+ Handle<String> conf_name =
+ isolate->factory()->LookupAsciiSymbol("configurable_");
+ Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
+ ASSERT(!isolate->has_pending_exception());
+ if (configurable->IsFalse()) {
+ Handle<String> trap =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> args[] = { handler, trap, name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
+ ASSERT(configurable->IsTrue());
+
+ // Check for AccessorDescriptor.
+ Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+ Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+ ASSERT(!isolate->has_pending_exception());
+ if (!setter->IsUndefined()) {
+ // We have a setter -- invoke it.
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return proxy->SetPropertyWithDefinedSetter(
+ JSReceiver::cast(*setter), *value);
+ } else {
+ Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
+ Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
+ ASSERT(!isolate->has_pending_exception());
+ if (!getter->IsUndefined()) {
+ // We have a getter but no setter -- the property may not be
+ // written. In strict mode, throw an error.
+ if (strict_mode == kNonStrictMode) return *value;
+ Handle<Object> args[] = { name, proxy };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
+ }
+ // Fall-through.
+ }
+
+ // The proxy does not define the property as an accessor.
+ *found = false;
return *value;
}
@@ -2277,31 +2745,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
HandleScope scope(isolate);
Handle<Object> receiver(this);
Handle<Object> name(name_raw);
- Handle<Object> handler(this->handler());
- // Extract trap function.
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<Object> args[] = { name };
+ Handle<Object> result = CallTrap(
+ "delete", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Failure::Exception();
- if (trap->IsUndefined()) {
- Handle<Object> args[] = { handler, trap_name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Failure::Exception();
- }
-
- // Call trap function.
- Object** args[] = { name.location() };
- bool has_exception;
- Handle<Object> result =
- Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
- if (has_exception) return Failure::Exception();
Object* bool_result = result->ToBoolean();
- if (mode == STRICT_DELETION &&
- bool_result == isolate->heap()->false_value()) {
- Handle<Object> args[] = { handler, trap_name };
+ if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+ Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
@@ -2311,39 +2764,76 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
}
+MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
+ uint32_t index,
+ DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return JSProxy::DeletePropertyWithHandler(*name, mode);
+}
+
+
MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
JSReceiver* receiver_raw,
- String* name_raw,
- bool* has_exception) {
+ String* name_raw) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
+ Handle<JSProxy> proxy(this);
+ Handle<Object> handler(this->handler()); // Trap might morph proxy.
Handle<JSReceiver> receiver(receiver_raw);
Handle<Object> name(name_raw);
- Handle<Object> handler(this->handler());
- // Extract trap function.
- Handle<String> trap_name =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<Object> args[] = { name };
+ Handle<Object> result = CallTrap(
+ "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return NONE;
- if (trap->IsUndefined()) {
- Handle<Object> args[] = { handler, trap_name };
+
+ if (result->IsUndefined()) return ABSENT;
+
+ bool has_pending_exception;
+ Handle<Object> argv[] = { result };
+ Handle<Object> desc =
+ Execution::Call(isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
+ if (has_pending_exception) return NONE;
+
+ // Convert result to PropertyAttributes.
+ Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
+ Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
+ if (isolate->has_pending_exception()) return NONE;
+ Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
+ Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
+ if (isolate->has_pending_exception()) return NONE;
+ Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
+ Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+ if (isolate->has_pending_exception()) return NONE;
+
+ if (configurable->IsFalse()) {
+ Handle<String> trap =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+ "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
- *has_exception = true;
return NONE;
}
- // Call trap function.
- Object** args[] = { name.location() };
- Handle<Object> result =
- Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
- if (has_exception) return NONE;
+ int attributes = NONE;
+ if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
+ if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
+ if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
+ return static_cast<PropertyAttributes>(attributes);
+}
+
- // TODO(rossberg): convert result to PropertyAttributes
- USE(result);
- return NONE;
+MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+ JSReceiver* receiver,
+ uint32_t index) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return GetPropertyAttributeWithHandler(receiver, *name);
}
@@ -2352,6 +2842,9 @@ void JSProxy::Fix() {
HandleScope scope(isolate);
Handle<JSProxy> self(this);
+ // Save identity hash.
+ MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+
if (IsJSFunctionProxy()) {
isolate->factory()->BecomeJSFunction(self);
// Code will be set on the JavaScript side.
@@ -2359,9 +2852,42 @@ void JSProxy::Fix() {
isolate->factory()->BecomeJSObject(self);
}
ASSERT(self->IsJSObject());
+
+ // Inherit identity, if it was present.
+ Object* hash;
+ if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
+ Handle<JSObject> new_self(JSObject::cast(*self));
+ isolate->factory()->SetIdentityHash(new_self, hash);
+ }
}
+MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
+ Handle<Object> derived,
+ int argc,
+ Handle<Object> argv[]) {
+ Isolate* isolate = GetIsolate();
+ Handle<Object> handler(this->handler());
+
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (isolate->has_pending_exception()) return trap;
+
+ if (trap->IsUndefined()) {
+ if (derived.is_null()) {
+ Handle<Object> args[] = { handler, trap_name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
+ }
+ trap = Handle<Object>(derived);
+ }
+
+ bool threw;
+ return Execution::Call(trap, handler, argc, argv, &threw);
+}
+
MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
String* name,
@@ -2386,48 +2912,46 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
}
// Check access rights if needed.
- if (IsAccessCheckNeeded()
- && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(result,
- name,
- value,
- true,
- strict_mode);
+ if (IsAccessCheckNeeded()) {
+ if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(
+ result, name, value, true, strict_mode);
+ }
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetProperty(
+ return JSObject::cast(proto)->SetPropertyForResult(
result, name, value, attributes, strict_mode);
}
if (!result->IsProperty() && !IsJSContextExtensionObject()) {
- // We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result;
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsProperty()) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder(),
- strict_mode);
- }
+ bool found = false;
+ MaybeObject* result_object;
+ result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+ value,
+ attributes,
+ &found,
+ strict_mode);
+ if (found) return result_object;
}
+
+ // At this point, no GC should have happened, as this would invalidate
+ // 'result', which we cannot handlify!
+
if (!result->IsFound()) {
// Neither properties nor transitions found.
return AddProperty(name, value, attributes, strict_mode);
}
if (result->IsReadOnly() && result->IsProperty()) {
if (strict_mode == kStrictMode) {
- HandleScope scope(heap->isolate());
- Handle<String> key(name);
- Handle<Object> holder(this);
- Handle<Object> args[2] = { key, holder };
+ Handle<JSObject> self(this);
+ Handle<String> hname(name);
+ Handle<Object> args[] = { hname, self };
return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, 2)));
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
} else {
return value;
}
@@ -2483,10 +3007,11 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
case NULL_DESCRIPTOR:
case ELEMENTS_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- default:
+ case HANDLER:
UNREACHABLE();
+ return value;
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return value;
}
@@ -2508,12 +3033,12 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
- LookupResult result;
+ Isolate* isolate = GetIsolate();
+ LookupResult result(isolate);
LocalLookup(name, &result);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(&result,
name,
value,
@@ -2571,10 +3096,11 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
case NULL_DESCRIPTOR:
case ELEMENTS_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- default:
+ case HANDLER:
UNREACHABLE();
+ return value;
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return value;
}
@@ -2584,7 +3110,7 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
String* name,
bool continue_search) {
// Check local property, ignore interceptor.
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsProperty()) return result.GetAttributes();
@@ -2656,12 +3182,11 @@ PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
String* key) {
uint32_t index = 0;
if (IsJSObject() && key->AsArrayIndex(&index)) {
- if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
- return NONE;
- return ABSENT;
+ return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
+ ? NONE : ABSENT;
}
// Named property.
- LookupResult result;
+ LookupResult result(GetIsolate());
Lookup(key, &result);
return GetPropertyAttribute(receiver, &result, key, true);
}
@@ -2688,10 +3213,8 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
case CALLBACKS:
return result->GetAttributes();
case HANDLER: {
- // TODO(rossberg): propagate exceptions properly.
- bool has_exception = false;
- return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
- receiver, name, &has_exception);
+ return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
+ receiver, name);
}
case INTERCEPTOR:
return result->holder()->GetPropertyAttributeWithInterceptor(
@@ -2712,7 +3235,7 @@ PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
return ABSENT;
}
// Named property.
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookup(name, &result);
return GetPropertyAttribute(this, &result, name, false);
}
@@ -2727,7 +3250,9 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj,
if (result->IsMap() &&
Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
#ifdef DEBUG
- Map::cast(result)->SharedMapVerify();
+ if (FLAG_verify_heap) {
+ Map::cast(result)->SharedMapVerify();
+ }
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit.
Object* fresh;
@@ -2763,6 +3288,15 @@ void NormalizedMapCache::Clear() {
}
+void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
+ Handle<String> name,
+ Handle<Code> code) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION_VOID(isolate,
+ object->UpdateMapCodeCache(*name, *code));
+}
+
+
MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
if (map()->is_shared()) {
// Fast case maps are never marked as shared.
@@ -2852,12 +3386,14 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
case INTERCEPTOR:
case ELEMENTS_TRANSITION:
break;
- default:
+ case HANDLER:
+ case NORMAL:
UNREACHABLE();
+ break;
}
}
- Heap* current_heap = map_of_this->heap();
+ Heap* current_heap = GetHeap();
// Copy the next enumeration index from instance descriptor.
int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
@@ -2879,6 +3415,10 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
ASSERT(instance_size_delta >= 0);
current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+ MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta);
+ }
+
set_map(new_map);
new_map->clear_instance_descriptors();
@@ -2912,13 +3452,14 @@ MaybeObject* JSObject::NormalizeElements() {
FixedArrayBase* array = FixedArrayBase::cast(elements());
Map* old_map = array->map();
bool is_arguments =
- (old_map == old_map->heap()->non_strict_arguments_elements_map());
+ (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
if (is_arguments) {
array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
}
if (array->IsDictionary()) return array;
ASSERT(HasFastElements() ||
+ HasFastSmiOnlyElements() ||
HasFastDoubleElements() ||
HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
@@ -2953,7 +3494,8 @@ MaybeObject* JSObject::NormalizeElements() {
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
} else {
- ASSERT(old_map->has_fast_elements());
+ ASSERT(old_map->has_fast_elements() ||
+ old_map->has_fast_smi_only_elements());
value = FixedArray::cast(array)->get(i);
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -2973,13 +3515,14 @@ MaybeObject* JSObject::NormalizeElements() {
// Set the new map first to satify the elements type assert in
// set_elements().
Object* new_map;
- MaybeObject* maybe = map()->GetSlowElementsMap();
+ MaybeObject* maybe = GetElementsTransitionMap(DICTIONARY_ELEMENTS);
if (!maybe->ToObject(&new_map)) return maybe;
set_map(Map::cast(new_map));
set_elements(dictionary);
}
- old_map->isolate()->counters()->elements_to_dictionary()->Increment();
+ old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+ Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -2993,102 +3536,229 @@ MaybeObject* JSObject::NormalizeElements() {
}
-MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
+Smi* JSReceiver::GenerateIdentityHash() {
Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- Object* holder = BypassGlobalProxy();
- if (holder->IsUndefined()) return heap->undefined_value();
- JSObject* obj = JSObject::cast(holder);
- if (obj->HasFastProperties()) {
+
+ int hash_value;
+ int attempts = 0;
+ do {
+ // Generate a random 32-bit hash value but limit range to fit
+ // within a smi.
+ hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
+ attempts++;
+ } while (hash_value == 0 && attempts < 30);
+ hash_value = hash_value != 0 ? hash_value : 1; // never return 0
+
+ return Smi::FromInt(hash_value);
+}
+
+
+MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
+ MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+ hash);
+ if (maybe->IsFailure()) return maybe;
+ return this;
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
+ Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
+ if (stored_value->IsSmi()) return stored_value;
+
+ // Do not generate permanent identity hash code if not requested.
+ if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
+
+ Smi* hash = GenerateIdentityHash();
+ MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+ hash);
+ if (result->IsFailure()) return result;
+ if (result->ToObjectUnchecked()->IsUndefined()) {
+ // Trying to get hash of detached proxy.
+ return Smi::FromInt(0);
+ }
+ return hash;
+}
+
+
+MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
+ Object* hash = this->hash();
+ if (!hash->IsSmi() && flag == ALLOW_CREATION) {
+ hash = GenerateIdentityHash();
+ set_hash(hash);
+ }
+ return hash;
+}
+
+
+Object* JSObject::GetHiddenProperty(String* key) {
+ if (IsJSGlobalProxy()) {
+ // For a proxy, use the prototype as target object.
+ Object* proxy_parent = GetPrototype();
+ // If the proxy is detached, return undefined.
+ if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+ ASSERT(proxy_parent->IsJSGlobalObject());
+ return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
+ }
+ ASSERT(!IsJSGlobalProxy());
+ MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+ ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
+ if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
+ return GetHeap()->undefined_value();
+ }
+ StringDictionary* dictionary =
+ StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+ int entry = dictionary->FindEntry(key);
+ if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
+ return dictionary->ValueAt(entry);
+}
+
+
+MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
+ if (IsJSGlobalProxy()) {
+ // For a proxy, use the prototype as target object.
+ Object* proxy_parent = GetPrototype();
+ // If the proxy is detached, return undefined.
+ if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+ ASSERT(proxy_parent->IsJSGlobalObject());
+ return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
+ }
+ ASSERT(!IsJSGlobalProxy());
+ MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
+ StringDictionary* dictionary;
+ if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
+
+ // If it was found, check if the key is already in the dictionary.
+ int entry = dictionary->FindEntry(key);
+ if (entry != StringDictionary::kNotFound) {
+ // If key was found, just update the value.
+ dictionary->ValueAtPut(entry, value);
+ return this;
+ }
+ // Key was not already in the dictionary, so add the entry.
+ MaybeObject* insert_result = dictionary->Add(key,
+ value,
+ PropertyDetails(NONE, NORMAL));
+ StringDictionary* new_dict;
+ if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
+ if (new_dict != dictionary) {
+ // If adding the key expanded the dictionary (i.e., Add returned a new
+ // dictionary), store it back to the object.
+ MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
+ if (store_result->IsFailure()) return store_result;
+ }
+ // Return this to mark success.
+ return this;
+}
+
+
+void JSObject::DeleteHiddenProperty(String* key) {
+ if (IsJSGlobalProxy()) {
+ // For a proxy, use the prototype as target object.
+ Object* proxy_parent = GetPrototype();
+ // If the proxy is detached, return immediately.
+ if (proxy_parent->IsNull()) return;
+ ASSERT(proxy_parent->IsJSGlobalObject());
+ JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
+ return;
+ }
+ MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+ ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
+ if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
+ StringDictionary* dictionary =
+ StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+ int entry = dictionary->FindEntry(key);
+ if (entry == StringDictionary::kNotFound) {
+ // Key wasn't in dictionary. Deletion is a success.
+ return;
+ }
+ // Key was in the dictionary. Remove it.
+ dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
+}
+
+
+bool JSObject::HasHiddenProperties() {
+ return GetPropertyAttributePostInterceptor(this,
+ GetHeap()->hidden_symbol(),
+ false) != ABSENT;
+}
+
+
+MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
+ ASSERT(!IsJSGlobalProxy());
+ if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden symbol. Since the
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = obj->map()->instance_descriptors();
+ DescriptorArray* descriptors = this->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == heap->hidden_symbol()) &&
+ (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
- return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
+ Object* hidden_store =
+ this->FastPropertyAt(descriptors->GetFieldIndex(0));
+ return StringDictionary::cast(hidden_store);
}
- }
-
- // Only attempt to find the hidden properties in the local object and not
- // in the prototype chain.
- if (!obj->HasHiddenPropertiesObject()) {
- // Hidden properties object not found. Allocate a new hidden properties
- // object if requested. Otherwise return the undefined value.
- if (flag == ALLOW_CREATION) {
- Object* hidden_obj;
- { MaybeObject* maybe_obj = heap->AllocateJSObject(
- isolate->context()->global_context()->object_function());
- if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
- }
- // Don't allow leakage of the hidden object through accessors
- // on Object.prototype.
- {
- MaybeObject* maybe_obj =
- JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
- return obj->SetHiddenPropertiesObject(hidden_obj);
- } else {
- return heap->undefined_value();
- }
- }
- return obj->GetHiddenPropertiesObject();
+ } else {
+ PropertyAttributes attributes;
+ // You can't install a getter on a property indexed by the hidden symbol,
+ // so we can be sure that GetLocalPropertyPostInterceptor returns a real
+ // object.
+ Object* lookup =
+ GetLocalPropertyPostInterceptor(this,
+ GetHeap()->hidden_symbol(),
+ &attributes)->ToObjectUnchecked();
+ if (!lookup->IsUndefined()) {
+ return StringDictionary::cast(lookup);
+ }
+ }
+ if (!create_if_absent) return GetHeap()->undefined_value();
+ const int kInitialSize = 5;
+ MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
+ StringDictionary* dictionary;
+ if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
+ MaybeObject* store_result =
+ SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+ dictionary,
+ DONT_ENUM,
+ kNonStrictMode);
+ if (store_result->IsFailure()) return store_result;
+ return dictionary;
}
-MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
- Isolate* isolate = GetIsolate();
- Object* hidden_props_obj;
- { MaybeObject* maybe_obj = GetHiddenProperties(flag);
- if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
- }
- if (!hidden_props_obj->IsJSObject()) {
- // We failed to create hidden properties. That's a detached
- // global proxy.
- ASSERT(hidden_props_obj->IsUndefined());
- return Smi::FromInt(0);
- }
- JSObject* hidden_props = JSObject::cast(hidden_props_obj);
- String* hash_symbol = isolate->heap()->identity_hash_symbol();
- {
- // Note that HasLocalProperty() can cause a GC in the general case in the
- // presence of interceptors.
- AssertNoAllocation no_alloc;
- if (hidden_props->HasLocalProperty(hash_symbol)) {
- MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
- return Smi::cast(hash->ToObjectChecked());
+MaybeObject* JSObject::SetHiddenPropertiesDictionary(
+ StringDictionary* dictionary) {
+ ASSERT(!IsJSGlobalProxy());
+ ASSERT(HasHiddenProperties());
+ if (HasFastProperties()) {
+ // If the object has fast properties, check whether the first slot
+ // in the descriptor array matches the hidden symbol. Since the
+ // hidden symbols hash code is zero (and no other string has hash
+ // code zero) it will always occupy the first entry if present.
+ DescriptorArray* descriptors = this->map()->instance_descriptors();
+ if ((descriptors->number_of_descriptors() > 0) &&
+ (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
+ descriptors->IsProperty(0)) {
+ ASSERT(descriptors->GetType(0) == FIELD);
+ this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
+ return this;
}
}
-
- int hash_value;
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = V8::Random(isolate) & Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
-
- Smi* hash = Smi::FromInt(hash_value);
- { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
- hash_symbol,
- hash,
- static_cast<PropertyAttributes>(None));
- if (result->IsFailure()) return result;
- }
- return hash;
+ MaybeObject* store_result =
+ SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+ dictionary,
+ DONT_ENUM,
+ kNonStrictMode);
+ if (store_result->IsFailure()) return store_result;
+ return this;
}
MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
DeleteMode mode) {
// Check local property, ignore interceptor.
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
if (!result.IsProperty()) return GetHeap()->true_value();
@@ -3201,9 +3871,16 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
if (IsJSProxy()) {
return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
- } else {
- return JSObject::cast(this)->DeleteProperty(name, mode);
}
+ return JSObject::cast(this)->DeleteProperty(name, mode);
+}
+
+
+MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+ }
+ return JSObject::cast(this)->DeleteElement(index, mode);
}
@@ -3230,7 +3907,7 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
if (name->AsArrayIndex(&index)) {
return DeleteElement(index, mode);
} else {
- LookupResult result;
+ LookupResult result(isolate);
LocalLookup(name, &result);
if (!result.IsProperty()) return isolate->heap()->true_value();
// Ignore attributes if forcing a deletion.
@@ -3267,7 +3944,8 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
- ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+ ASSERT(kind == FAST_ELEMENTS ||
+ kind == DICTIONARY_ELEMENTS);
if (kind == FAST_ELEMENTS) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
@@ -3287,7 +3965,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
// Check whether this object references another object.
bool JSObject::ReferencesObject(Object* obj) {
Map* map_of_this = map();
- Heap* heap = map_of_this->heap();
+ Heap* heap = GetHeap();
AssertNoAllocation no_alloc;
// Is the object the constructor for this object?
@@ -3322,6 +4000,8 @@ bool JSObject::ReferencesObject(Object* obj) {
// Raw pixels and external arrays do not reference other
// objects.
break;
+ case FAST_SMI_ONLY_ELEMENTS:
+ break;
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(this->elements());
@@ -3438,15 +4118,16 @@ MaybeObject* JSObject::PreventExtensions() {
// Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that it has
-// no interceptors and needs no access checks).
+// - This object and all prototypes has an enum cache (which means that
+// it is no proxy, has no interceptors and needs no access checks).
// - This object has no elements.
// - No prototype has enumerable properties/elements.
-bool JSObject::IsSimpleEnum() {
+bool JSReceiver::IsSimpleEnum() {
Heap* heap = GetHeap();
for (Object* o = this;
o != heap->null_value();
o = JSObject::cast(o)->GetPrototype()) {
+ if (!o->IsJSObject()) return false;
JSObject* curr = JSObject::cast(o);
if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
ASSERT(!curr->HasNamedInterceptor());
@@ -3509,15 +4190,6 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
void JSReceiver::LocalLookup(String* name, LookupResult* result) {
- if (IsJSProxy()) {
- result->HandlerResult();
- } else {
- JSObject::cast(this)->LocalLookup(name, result);
- }
-}
-
-
-void JSObject::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
Heap* heap = GetHeap();
@@ -3526,28 +4198,36 @@ void JSObject::LocalLookup(String* name, LookupResult* result) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->LocalLookup(name, result);
+ return JSReceiver::cast(proto)->LocalLookup(name, result);
+ }
+
+ if (IsJSProxy()) {
+ result->HandlerResult(JSProxy::cast(this));
+ return;
}
// Do not use inline caching if the object is a non-global object
// that requires access checks.
- if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
+ if (IsAccessCheckNeeded()) {
result->DisallowCaching();
}
+ JSObject* js_object = JSObject::cast(this);
+
// Check __proto__ before interceptor.
if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
- result->ConstantResult(this);
+ result->ConstantResult(js_object);
return;
}
// Check for lookup interceptor except when bootstrapping.
- if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
- result->InterceptorResult(this);
+ if (js_object->HasNamedInterceptor() &&
+ !heap->isolate()->bootstrapper()->IsActive()) {
+ result->InterceptorResult(js_object);
return;
}
- LocalLookupRealNamedProperty(name, result);
+ js_object->LocalLookupRealNamedProperty(name, result);
}
@@ -3557,7 +4237,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
for (Object* current = this;
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
- JSObject::cast(current)->LocalLookup(name, result);
+ JSReceiver::cast(current)->LocalLookup(name, result);
if (result->IsProperty()) return;
}
result->NotFound();
@@ -3568,7 +4248,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
void JSObject::LookupCallback(String* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
- current != heap->null_value();
+ current != heap->null_value() && current->IsJSObject();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty() && result->type() == CALLBACKS) return;
@@ -3577,19 +4257,27 @@ void JSObject::LookupCallback(String* name, LookupResult* result) {
}
-// Search for a getter or setter in an elements dictionary. Returns either
-// undefined if the element is read-only, or the getter/setter pair (fixed
-// array) if there is an existing one, or the hole value if the element does
-// not exist or is a normal non-getter/setter data element.
-static Object* FindGetterSetterInDictionary(NumberDictionary* dictionary,
- uint32_t index,
- Heap* heap) {
+// Search for a getter or setter in an elements dictionary and update its
+// attributes. Returns either undefined if the element is read-only, or the
+// getter/setter pair (fixed array) if there is an existing one, or the hole
+// value if the element does not exist or is a normal non-getter/setter data
+// element.
+static Object* UpdateGetterSetterInDictionary(NumberDictionary* dictionary,
+ uint32_t index,
+ PropertyAttributes attributes,
+ Heap* heap) {
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsReadOnly()) return heap->undefined_value();
- if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
+ if (details.type() == CALLBACKS && result->IsFixedArray()) {
+ if (details.attributes() != attributes) {
+ dictionary->DetailsAtPut(entry,
+ PropertyDetails(attributes, CALLBACKS, index));
+ }
+ return result;
+ }
}
return heap->the_hole_value();
}
@@ -3614,6 +4302,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
if (is_element) {
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
break;
@@ -3630,8 +4319,10 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
// elements.
return heap->undefined_value();
case DICTIONARY_ELEMENTS: {
- Object* probe =
- FindGetterSetterInDictionary(element_dictionary(), index, heap);
+ Object* probe = UpdateGetterSetterInDictionary(element_dictionary(),
+ index,
+ attributes,
+ heap);
if (!probe->IsTheHole()) return probe;
// Otherwise allow to override it.
break;
@@ -3648,7 +4339,10 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
NumberDictionary* dictionary = NumberDictionary::cast(arguments);
- probe = FindGetterSetterInDictionary(dictionary, index, heap);
+ probe = UpdateGetterSetterInDictionary(dictionary,
+ index,
+ attributes,
+ heap);
if (!probe->IsTheHole()) return probe;
}
}
@@ -3657,7 +4351,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
}
} else {
// Lookup the name.
- LookupResult result;
+ LookupResult result(heap->isolate());
LocalLookup(name, &result);
if (result.IsProperty()) {
if (result.IsReadOnly()) return heap->undefined_value();
@@ -3687,8 +4381,8 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
bool JSObject::CanSetCallback(String* name) {
- ASSERT(!IsAccessCheckNeeded()
- || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
+ ASSERT(!IsAccessCheckNeeded() ||
+ GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
@@ -3696,7 +4390,7 @@ bool JSObject::CanSetCallback(String* name) {
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
// cause security problems.
- LookupResult callback_result;
+ LookupResult callback_result(GetIsolate());
LookupCallback(name, &callback_result);
if (callback_result.IsProperty()) {
Object* obj = callback_result.GetCallbackObject();
@@ -3800,7 +4494,7 @@ MaybeObject* JSObject::DefineAccessor(String* name,
bool is_getter,
Object* fun,
PropertyAttributes attributes) {
- ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+ ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -3863,6 +4557,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
break;
@@ -3892,7 +4587,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
}
} else {
// Lookup the name.
- LookupResult result;
+ LookupResult result(isolate);
LocalLookup(name, &result);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
@@ -3925,7 +4620,11 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
}
// Make the lookup and include prototypes.
- int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
+ // Introducing constants below makes static constants usage purely static
+ // and avoids linker errors in debug build using gcc.
+ const int getter_index = kGetterIndex;
+ const int setter_index = kSetterIndex;
+ int accessor_index = is_getter ? getter_index : setter_index;
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
@@ -3950,7 +4649,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
for (Object* obj = this;
obj != heap->null_value();
obj = JSObject::cast(obj)->GetPrototype()) {
- LookupResult result;
+ LookupResult result(heap->isolate());
JSObject::cast(obj)->LocalLookup(name, &result);
if (result.IsProperty()) {
if (result.IsReadOnly()) return heap->undefined_value();
@@ -4058,7 +4757,7 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
#ifdef DEBUG
- if (Map::cast(result)->is_shared()) {
+ if (FLAG_verify_heap && Map::cast(result)->is_shared()) {
Map::cast(result)->SharedMapVerify();
}
#endif
@@ -4081,12 +4780,19 @@ MaybeObject* Map::CopyDropTransitions() {
return new_map;
}
+void Map::UpdateCodeCache(Handle<Map> map,
+ Handle<String> name,
+ Handle<Code> code) {
+ Isolate* isolate = map->GetIsolate();
+ CALL_HEAP_FUNCTION_VOID(isolate,
+ map->UpdateCodeCache(*name, *code));
+}
MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
- { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
+ { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set_code_cache(result);
@@ -4128,7 +4834,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// Traverse the transition tree without using a stack. We do this by
// reversing the pointers in the maps and descriptor arrays.
Map* current = this;
- Map* meta_map = heap()->meta_map();
+ Map* meta_map = GetHeap()->meta_map();
Object** map_or_index_field = NULL;
while (current != meta_map) {
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
@@ -4149,7 +4855,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// of the next map and recording the index in the transition array in
// the map field of the array.
Map* next = Map::cast(contents->get(i));
- next->set_map(current);
+ next->set_map_unsafe(current);
*map_or_index_field = Smi::FromInt(i + 2);
current = next;
map_done = false;
@@ -4174,23 +4880,23 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
Object* perhaps_map = prototype_transitions->get(i);
if (perhaps_map->IsMap()) {
Map* next = Map::cast(perhaps_map);
- next->set_map(current);
+ next->set_map_unsafe(current);
*proto_map_or_index_field =
Smi::FromInt(i + kProtoTransitionElementsPerEntry);
current = next;
continue;
}
}
- *proto_map_or_index_field = heap()->fixed_array_map();
+ *proto_map_or_index_field = GetHeap()->fixed_array_map();
if (map_or_index_field != NULL) {
- *map_or_index_field = heap()->fixed_array_map();
+ *map_or_index_field = GetHeap()->fixed_array_map();
}
// The callback expects a map to have a real map as its map, so we save
// the map field, which is being used to track the traversal and put the
// correct map (the meta_map) in place while we do the callback.
Map* prev = current->map();
- current->set_map(meta_map);
+ current->set_map_unsafe(meta_map);
callback(current, data);
current = prev;
}
@@ -4406,7 +5112,7 @@ class CodeCacheHashTableKey : public HashTableKey {
MUST_USE_RESULT MaybeObject* AsObject() {
ASSERT(code_ != NULL);
Object* obj;
- { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
@@ -4464,13 +5170,22 @@ int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
void CodeCacheHashTable::RemoveByIndex(int index) {
ASSERT(index >= 0);
Heap* heap = GetHeap();
- set(EntryToIndex(index), heap->null_value());
- set(EntryToIndex(index) + 1, heap->null_value());
+ set(EntryToIndex(index), heap->the_hole_value());
+ set(EntryToIndex(index) + 1, heap->the_hole_value());
ElementRemoved();
}
-MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
+void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache,
+ MapHandleList* maps,
+ Code::Flags flags,
+ Handle<Code> code) {
+ Isolate* isolate = cache->GetIsolate();
+ CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code));
+}
+
+
+MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
Code::Flags flags,
Code* code) {
// Initialize cache if necessary.
@@ -4498,13 +5213,14 @@ MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
}
-Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
+Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
+ Code::Flags flags) {
if (!cache()->IsUndefined()) {
PolymorphicCodeCacheHashTable* hash_table =
PolymorphicCodeCacheHashTable::cast(cache());
- return hash_table->Lookup(maps, flags);
+ return Handle<Object>(hash_table->Lookup(maps, flags));
} else {
- return GetHeap()->undefined_value();
+ return GetIsolate()->factory()->undefined_value();
}
}
@@ -4515,12 +5231,12 @@ Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
class PolymorphicCodeCacheHashTableKey : public HashTableKey {
public:
// Callers must ensure that |maps| outlives the newly constructed object.
- PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
+ PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
: maps_(maps),
code_flags_(code_flags) {}
bool IsMatch(Object* other) {
- MapList other_maps(kDefaultListAllocationSize);
+ MapHandleList other_maps(kDefaultListAllocationSize);
int other_flags;
FromObject(other, &other_flags, &other_maps);
if (code_flags_ != other_flags) return false;
@@ -4536,7 +5252,7 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
for (int i = 0; i < maps_->length(); ++i) {
bool match_found = false;
for (int j = 0; j < other_maps.length(); ++j) {
- if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
+ if (maps_->at(i)->EquivalentTo(*other_maps.at(j))) {
match_found = true;
break;
}
@@ -4546,7 +5262,7 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
return true;
}
- static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
+ static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
uint32_t hash = code_flags;
for (int i = 0; i < maps->length(); ++i) {
hash ^= maps->at(i)->Hash();
@@ -4559,7 +5275,7 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
}
uint32_t HashForObject(Object* obj) {
- MapList other_maps(kDefaultListAllocationSize);
+ MapHandleList other_maps(kDefaultListAllocationSize);
int other_flags;
FromObject(obj, &other_flags, &other_maps);
return MapsHashHelper(&other_maps, other_flags);
@@ -4577,29 +5293,32 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey {
FixedArray* list = FixedArray::cast(obj);
list->set(0, Smi::FromInt(code_flags_));
for (int i = 0; i < maps_->length(); ++i) {
- list->set(i + 1, maps_->at(i));
+ list->set(i + 1, *maps_->at(i));
}
return list;
}
private:
- static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
+ static MapHandleList* FromObject(Object* obj,
+ int* code_flags,
+ MapHandleList* maps) {
FixedArray* list = FixedArray::cast(obj);
maps->Rewind(0);
*code_flags = Smi::cast(list->get(0))->value();
for (int i = 1; i < list->length(); ++i) {
- maps->Add(Map::cast(list->get(i)));
+ maps->Add(Handle<Map>(Map::cast(list->get(i))));
}
return maps;
}
- MapList* maps_; // weak.
+ MapHandleList* maps_; // weak.
int code_flags_;
static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
};
-Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
+Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
+ int code_flags) {
PolymorphicCodeCacheHashTableKey key(maps, code_flags);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -4607,7 +5326,7 @@ Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
}
-MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
int code_flags,
Code* code) {
PolymorphicCodeCacheHashTableKey key(maps, code_flags);
@@ -4742,9 +5461,9 @@ void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
if (IsEmpty()) return; // Do nothing for empty descriptor array.
FixedArray::cast(bridge_storage)->
set(kEnumCacheBridgeCacheIndex, new_cache);
- fast_set(FixedArray::cast(bridge_storage),
- kEnumCacheBridgeEnumIndex,
- get(kEnumerationIndexIndex));
+ NoWriteBarrierSet(FixedArray::cast(bridge_storage),
+ kEnumCacheBridgeEnumIndex,
+ get(kEnumerationIndexIndex));
set(kEnumerationIndexIndex, bridge_storage);
}
}
@@ -4805,10 +5524,16 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
++new_size;
}
}
+
+ DescriptorArray* new_descriptors;
{ MaybeObject* maybe_result = Allocate(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (!maybe_result->To<DescriptorArray>(&new_descriptors)) {
+ return maybe_result;
+ }
}
- DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
+ DescriptorArray::WhitenessWitness witness(new_descriptors);
+
// Set the enumeration index in the descriptors and set the enumeration index
// in the result.
int enumeration_index = NextEnumerationIndex();
@@ -4836,16 +5561,16 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
}
if (IsNullDescriptor(from_index)) continue;
if (remove_transitions && IsTransition(from_index)) continue;
- new_descriptors->CopyFrom(to_index++, this, from_index);
+ new_descriptors->CopyFrom(to_index++, this, from_index, witness);
}
- new_descriptors->Set(to_index++, descriptor);
+ new_descriptors->Set(to_index++, descriptor, witness);
if (replacing) from_index++;
for (; from_index < number_of_descriptors(); from_index++) {
if (IsNullDescriptor(from_index)) continue;
if (remove_transitions && IsTransition(from_index)) continue;
- new_descriptors->CopyFrom(to_index++, this, from_index);
+ new_descriptors->CopyFrom(to_index++, this, from_index, witness);
}
ASSERT(to_index == new_descriptors->number_of_descriptors());
@@ -4867,16 +5592,21 @@ MaybeObject* DescriptorArray::RemoveTransitions() {
}
// Allocate the new descriptor array.
- Object* result;
+ DescriptorArray* new_descriptors;
{ MaybeObject* maybe_result = Allocate(number_of_descriptors() - num_removed);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (!maybe_result->To<DescriptorArray>(&new_descriptors)) {
+ return maybe_result;
+ }
}
- DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
+ DescriptorArray::WhitenessWitness witness(new_descriptors);
// Copy the content.
int next_descriptor = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
+ if (IsProperty(i)) {
+ new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
+ }
}
ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
@@ -4884,7 +5614,7 @@ MaybeObject* DescriptorArray::RemoveTransitions() {
}
-void DescriptorArray::SortUnchecked() {
+void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
// In-place heap sort.
int len = number_of_descriptors();
@@ -4905,7 +5635,7 @@ void DescriptorArray::SortUnchecked() {
}
}
if (child_hash <= parent_hash) break;
- Swap(parent_index, child_index);
+ NoWriteBarrierSwapDescriptors(parent_index, child_index);
// Now element at child_index could be < its children.
parent_index = child_index; // parent_hash remains correct.
}
@@ -4914,8 +5644,8 @@ void DescriptorArray::SortUnchecked() {
// Extract elements and create sorted array.
for (int i = len - 1; i > 0; --i) {
// Put max element at the back of the array.
- Swap(0, i);
- // Sift down the new top element.
+ NoWriteBarrierSwapDescriptors(0, i);
+ // Shift down the new top element.
int parent_index = 0;
const uint32_t parent_hash = GetKey(parent_index)->Hash();
const int max_parent_index = (i / 2) - 1;
@@ -4930,15 +5660,15 @@ void DescriptorArray::SortUnchecked() {
}
}
if (child_hash <= parent_hash) break;
- Swap(parent_index, child_index);
+ NoWriteBarrierSwapDescriptors(parent_index, child_index);
parent_index = child_index;
}
}
}
-void DescriptorArray::Sort() {
- SortUnchecked();
+void DescriptorArray::Sort(const WhitenessWitness& witness) {
+ SortUnchecked(witness);
SLOW_ASSERT(IsSortedNoDuplicates());
}
@@ -5023,24 +5753,6 @@ bool String::LooksValid() {
}
-int String::Utf8Length() {
- if (IsAsciiRepresentation()) return length();
- // Attempt to flatten before accessing the string. It probably
- // doesn't make Utf8Length faster, but it is very likely that
- // the string will be accessed later (for example by WriteUtf8)
- // so it's still a good idea.
- Heap* heap = GetHeap();
- TryFlatten();
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(0, this);
- int result = 0;
- while (buffer->has_more())
- result += unibrow::Utf8::Length(buffer->GetNext());
- return result;
-}
-
-
String::FlatContent String::GetFlatContent() {
int length = this->length();
StringShape shape(this);
@@ -5067,7 +5779,7 @@ String::FlatContent String::GetFlatContent() {
if (shape.representation_tag() == kSeqStringTag) {
start = SeqAsciiString::cast(string)->GetChars();
} else {
- start = ExternalAsciiString::cast(string)->resource()->data();
+ start = ExternalAsciiString::cast(string)->GetChars();
}
return FlatContent(Vector<const char>(start + offset, length));
} else {
@@ -5076,7 +5788,7 @@ String::FlatContent String::GetFlatContent() {
if (shape.representation_tag() == kSeqStringTag) {
start = SeqTwoByteString::cast(string)->GetChars();
} else {
- start = ExternalTwoByteString::cast(string)->resource()->data();
+ start = ExternalTwoByteString::cast(string)->GetChars();
}
return FlatContent(Vector<const uc16>(start + offset, length));
}
@@ -5102,12 +5814,9 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
buffer->Reset(offset, this);
int character_position = offset;
int utf8_bytes = 0;
- while (buffer->has_more()) {
+ while (buffer->has_more() && character_position++ < offset + length) {
uint16_t character = buffer->GetNext();
- if (character_position < offset + length) {
- utf8_bytes += unibrow::Utf8::Length(character);
- }
- character_position++;
+ utf8_bytes += unibrow::Utf8::Length(character);
}
if (length_return) {
@@ -5121,16 +5830,13 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
buffer->Seek(offset);
character_position = offset;
int utf8_byte_position = 0;
- while (buffer->has_more()) {
+ while (buffer->has_more() && character_position++ < offset + length) {
uint16_t character = buffer->GetNext();
- if (character_position < offset + length) {
- if (allow_nulls == DISALLOW_NULLS && character == 0) {
- character = ' ';
- }
- utf8_byte_position +=
- unibrow::Utf8::Encode(result + utf8_byte_position, character);
+ if (allow_nulls == DISALLOW_NULLS && character == 0) {
+ character = ' ';
}
- character_position++;
+ utf8_byte_position +=
+ unibrow::Utf8::Encode(result + utf8_byte_position, character);
}
result[utf8_byte_position] = 0;
return SmartArrayPointer<char>(result);
@@ -5315,44 +6021,26 @@ const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
}
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return resource()->data()[index];
-}
-
-
const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
unsigned* remaining,
unsigned* offset_ptr,
unsigned max_chars) {
// Cast const char* to unibrow::byte* (signedness difference).
const unibrow::byte* b =
- reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
+ reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
*remaining = max_chars;
*offset_ptr += max_chars;
return b;
}
-const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
- unsigned start) {
- return resource()->data() + start;
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
- return resource()->data()[index];
-}
-
-
void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
unsigned chars_read = 0;
unsigned offset = *offset_ptr;
- const uint16_t* data = resource()->data();
+ const uint16_t* data = GetChars();
while (chars_read < max_chars) {
uint16_t c = data[offset];
if (c <= kMaxAsciiCharCode) {
@@ -5398,9 +6086,7 @@ void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
unsigned max_chars) {
unsigned capacity = rbb->capacity - rbb->cursor;
if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor,
- resource()->data() + *offset_ptr,
- max_chars);
+ memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
rbb->remaining += max_chars;
*offset_ptr += max_chars;
rbb->cursor += max_chars;
@@ -5464,6 +6150,73 @@ const unibrow::byte* String::ReadBlock(String* input,
}
+// This method determines the type of string involved and then gets the UTF8
+// length of the string. It doesn't flatten the string and has log(n) recursion
+// for a string of length n.
+int String::Utf8Length(String* input, int from, int to) {
+ if (from == to) return 0;
+ int total = 0;
+ while (true) {
+ if (input->IsAsciiRepresentation()) return total + to - from;
+ switch (StringShape(input).representation_tag()) {
+ case kConsStringTag: {
+ ConsString* str = ConsString::cast(input);
+ String* first = str->first();
+ String* second = str->second();
+ int first_length = first->length();
+ if (first_length - from < to - first_length) {
+ if (first_length > from) {
+ // Left hand side is shorter.
+ total += Utf8Length(first, from, first_length);
+ input = second;
+ from = 0;
+ to -= first_length;
+ } else {
+ // We only need the right hand side.
+ input = second;
+ from -= first_length;
+ to -= first_length;
+ }
+ } else {
+ if (first_length <= to) {
+ // Right hand side is shorter.
+ total += Utf8Length(second, 0, to - first_length);
+ input = first;
+ to = first_length;
+ } else {
+ // We only need the left hand side.
+ input = first;
+ }
+ }
+ continue;
+ }
+ case kExternalStringTag:
+ case kSeqStringTag: {
+ Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
+ const uc16* p = vector.start();
+ for (int i = from; i < to; i++) {
+ total += unibrow::Utf8::Length(p[i]);
+ }
+ return total;
+ }
+ case kSlicedStringTag: {
+ SlicedString* str = SlicedString::cast(input);
+ int offset = str->offset();
+ input = str->parent();
+ from += offset;
+ to += offset;
+ continue;
+ }
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+ }
+ return 0;
+}
+
+
void Relocatable::PostGarbageCollectionProcessing() {
Isolate* isolate = Isolate::Current();
Relocatable* current = isolate->relocatable_top();
@@ -5775,13 +6528,13 @@ void String::WriteToFlat(String* src,
switch (StringShape(source).full_representation_tag()) {
case kAsciiStringTag | kExternalStringTag: {
CopyChars(sink,
- ExternalAsciiString::cast(source)->resource()->data() + from,
+ ExternalAsciiString::cast(source)->GetChars() + from,
to - from);
return;
}
case kTwoByteStringTag | kExternalStringTag: {
const uc16* data =
- ExternalTwoByteString::cast(source)->resource()->data();
+ ExternalTwoByteString::cast(source)->GetChars();
CopyChars(sink,
data + from,
to - from);
@@ -5995,7 +6748,7 @@ bool String::MarkAsUndetectable() {
if (StringShape(this).IsSymbol()) return false;
Map* map = this->map();
- Heap* heap = map->heap();
+ Heap* heap = GetHeap();
if (map == heap->string_map()) {
this->set_map(heap->undetectable_string_map());
return true;
@@ -6198,29 +6951,43 @@ void String::PrintOn(FILE* file) {
}
+void Map::CreateOneBackPointer(Map* target) {
+#ifdef DEBUG
+ // Verify target.
+ Object* source_prototype = prototype();
+ Object* target_prototype = target->prototype();
+ ASSERT(source_prototype->IsJSReceiver() ||
+ source_prototype->IsMap() ||
+ source_prototype->IsNull());
+ ASSERT(target_prototype->IsJSReceiver() ||
+ target_prototype->IsNull());
+ ASSERT(source_prototype->IsMap() ||
+ source_prototype == target_prototype);
+#endif
+ // Point target back to source. set_prototype() will not let us set
+ // the prototype to a map, as we do here.
+ *RawField(target, kPrototypeOffset) = this;
+}
+
+
void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
- if (descriptors->GetType(i) == MAP_TRANSITION ||
- descriptors->GetType(i) == ELEMENTS_TRANSITION ||
- descriptors->GetType(i) == CONSTANT_TRANSITION) {
- // Get target.
- Map* target = Map::cast(descriptors->GetValue(i));
-#ifdef DEBUG
- // Verify target.
- Object* source_prototype = prototype();
- Object* target_prototype = target->prototype();
- ASSERT(source_prototype->IsJSObject() ||
- source_prototype->IsMap() ||
- source_prototype->IsNull());
- ASSERT(target_prototype->IsJSObject() ||
- target_prototype->IsNull());
- ASSERT(source_prototype->IsMap() ||
- source_prototype == target_prototype);
-#endif
- // Point target back to source. set_prototype() will not let us set
- // the prototype to a map, as we do here.
- *RawField(target, kPrototypeOffset) = this;
+ if (descriptors->IsTransition(i)) {
+ Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i));
+ if (object->IsMap()) {
+ CreateOneBackPointer(reinterpret_cast<Map*>(object));
+ } else {
+ ASSERT(object->IsFixedArray());
+ ASSERT(descriptors->GetType(i) == ELEMENTS_TRANSITION);
+ FixedArray* array = reinterpret_cast<FixedArray*>(object);
+ for (int i = 0; i < array->length(); ++i) {
+ Map* target = reinterpret_cast<Map*>(array->get(i));
+ if (!target->IsUndefined()) {
+ CreateOneBackPointer(target);
+ }
+ }
+ }
}
}
}
@@ -6244,19 +7011,47 @@ void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
// map is not reached again by following a back pointer from a
// non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.type() == MAP_TRANSITION ||
- details.type() == ELEMENTS_TRANSITION ||
- details.type() == CONSTANT_TRANSITION) {
- Map* target = reinterpret_cast<Map*>(contents->get(i));
- ASSERT(target->IsHeapObject());
- if (!target->IsMarked()) {
- ASSERT(target->IsMap());
- contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(heap, i);
- ASSERT(target->prototype() == this ||
- target->prototype() == real_prototype);
- // Getter prototype() is read-only, set_prototype() has side effects.
- *RawField(target, Map::kPrototypeOffset) = real_prototype;
+ if (IsTransitionType(details.type())) {
+ Object* object = reinterpret_cast<Object*>(contents->get(i));
+ if (object->IsMap()) {
+ Map* target = reinterpret_cast<Map*>(object);
+ ASSERT(target->IsHeapObject());
+ MarkBit map_mark = Marking::MarkBitFrom(target);
+ if (!map_mark.Get()) {
+ ASSERT(target->IsMap());
+ contents->set_unchecked(i + 1, NullDescriptorDetails);
+ contents->set_null_unchecked(heap, i);
+ ASSERT(target->prototype() == this ||
+ target->prototype() == real_prototype);
+ // Getter prototype() is read-only, set_prototype() has side effects.
+ *RawField(target, Map::kPrototypeOffset) = real_prototype;
+ }
+ } else {
+ ASSERT(object->IsFixedArray());
+ ASSERT(details.type() == ELEMENTS_TRANSITION);
+ FixedArray* array = reinterpret_cast<FixedArray*>(object);
+ bool reachable_map_found = false;
+ for (int j = 0; j < array->length(); ++j) {
+ Map* target = reinterpret_cast<Map*>(array->get(j));
+ ASSERT(target->IsHeapObject());
+ MarkBit map_mark = Marking::MarkBitFrom(target);
+ if (!map_mark.Get()) {
+ ASSERT(target->IsMap());
+ array->set_undefined(j);
+ ASSERT(target->prototype() == this ||
+ target->prototype() == real_prototype);
+ // Getter prototype() is read-only, set_prototype() has side
+ // effects.
+ *RawField(target, Map::kPrototypeOffset) = real_prototype;
+ } else if (target->IsMap()) {
+ reachable_map_found = true;
+ }
+ }
+ // If no map was found, make sure the FixedArray also gets collected.
+ if (!reachable_map_found) {
+ contents->set_unchecked(i + 1, NullDescriptorDetails);
+ contents->set_null_unchecked(heap, i);
+ }
}
}
}
@@ -6315,6 +7110,57 @@ void JSFunction::MarkForLazyRecompilation() {
}
+bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ return shared->is_compiled() || CompileLazy(shared, flag);
+}
+
+
+static bool CompileLazyHelper(CompilationInfo* info,
+ ClearExceptionFlag flag) {
+ // Compile the source information to a code object.
+ ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
+ ASSERT(!info->isolate()->has_pending_exception());
+ bool result = Compiler::CompileLazy(info);
+ ASSERT(result != Isolate::Current()->has_pending_exception());
+ if (!result && flag == CLEAR_EXCEPTION) {
+ info->isolate()->clear_pending_exception();
+ }
+ return result;
+}
+
+
+bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag) {
+ CompilationInfo info(shared);
+ return CompileLazyHelper(&info, flag);
+}
+
+
+bool JSFunction::CompileLazy(Handle<JSFunction> function,
+ ClearExceptionFlag flag) {
+ bool result = true;
+ if (function->shared()->is_compiled()) {
+ function->ReplaceCode(function->shared()->code());
+ function->shared()->set_code_age(0);
+ } else {
+ CompilationInfo info(function);
+ result = CompileLazyHelper(&info, flag);
+ ASSERT(!result || function->is_compiled());
+ }
+ return result;
+}
+
+
+bool JSFunction::CompileOptimized(Handle<JSFunction> function,
+ int osr_ast_id,
+ ClearExceptionFlag flag) {
+ CompilationInfo info(function);
+ info.SetOptimizing(osr_ast_id);
+ return CompileLazyHelper(&info, flag);
+}
+
+
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
@@ -6362,7 +7208,7 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
}
Map* new_map = Map::cast(new_object);
- Heap* heap = new_map->heap();
+ Heap* heap = new_map->GetHeap();
set_map(new_map);
new_map->set_constructor(value);
new_map->set_non_instance_prototype(true);
@@ -6379,21 +7225,21 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
Object* JSFunction::RemovePrototype() {
Context* global_context = context()->global_context();
- Map* no_prototype_map = shared()->strict_mode()
- ? global_context->strict_mode_function_without_prototype_map()
- : global_context->function_without_prototype_map();
+ Map* no_prototype_map = shared()->is_classic_mode()
+ ? global_context->function_without_prototype_map()
+ : global_context->strict_mode_function_without_prototype_map();
if (map() == no_prototype_map) {
// Be idempotent.
return this;
}
- ASSERT(!shared()->strict_mode() ||
- map() == global_context->strict_mode_function_map());
- ASSERT(shared()->strict_mode() || map() == global_context->function_map());
+ ASSERT(map() == (shared()->is_classic_mode()
+ ? global_context->function_map()
+ : global_context->strict_mode_function_map()));
set_map(no_prototype_map);
- set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
+ set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
return this;
}
@@ -6497,7 +7343,7 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
obj = obj->GetPrototype()) {
JSObject* js_object = JSObject::cast(obj);
for (int i = 0; i < this_property_assignments_count(); i++) {
- LookupResult result;
+ LookupResult result(heap->isolate());
String* name = GetThisPropertyAssignmentName(i);
js_object->LocalLookupRealNamedProperty(name, &result);
if (result.IsProperty() && result.type() == CALLBACKS) {
@@ -6686,6 +7532,8 @@ bool SharedFunctionInfo::VerifyBailoutId(int id) {
void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
ASSERT(!IsInobjectSlackTrackingInProgress());
+ if (!FLAG_clever_optimizations) return;
+
// Only initiate the tracking the first time.
if (live_objects_may_exist()) return;
set_live_objects_may_exist(true);
@@ -6701,7 +7549,7 @@ void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
set_construction_count(kGenerousAllocationCount);
}
set_initial_map(map);
- Builtins* builtins = map->heap()->isolate()->builtins();
+ Builtins* builtins = map->GetHeap()->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
construct_stub());
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6721,8 +7569,9 @@ void SharedFunctionInfo::DetachInitialMap() {
// then StartInobjectTracking will be called again the next time the
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
- set_initial_map(map->heap()->raw_unchecked_undefined_value());
- Builtins* builtins = map->heap()->isolate()->builtins();
+ Heap* heap = map->GetHeap();
+ set_initial_map(heap->raw_unchecked_undefined_value());
+ Builtins* builtins = heap->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
@@ -6738,7 +7587,7 @@ void SharedFunctionInfo::AttachInitialMap(Map* map) {
// Resume inobject slack tracking.
set_initial_map(map);
- Builtins* builtins = map->heap()->isolate()->builtins();
+ Builtins* builtins = map->GetHeap()->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
*RawField(this, kConstructStubOffset));
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6770,7 +7619,7 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
Map* map = Map::cast(initial_map());
- Heap* heap = map->heap();
+ Heap* heap = map->GetHeap();
set_initial_map(heap->undefined_value());
Builtins* builtins = heap->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
@@ -6832,8 +7681,18 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
}
+void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ VisitPointer(rinfo->target_object_address());
+}
+
+void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
+ Address* p = rinfo->target_reference_address();
+ VisitExternalReferences(p, p + 1);
+}
+
void Code::InvalidateRelocation() {
- set_relocation_info(heap()->empty_byte_array());
+ set_relocation_info(GetHeap()->empty_byte_array());
}
@@ -6846,6 +7705,8 @@ void Code::Relocate(intptr_t delta) {
void Code::CopyFrom(const CodeDesc& desc) {
+ ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
+
// copy code
memmove(instruction_start(), desc.buffer, desc.instr_size);
@@ -6865,16 +7726,17 @@ void Code::CopyFrom(const CodeDesc& desc) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
- it.rinfo()->set_target_object(*p);
+ it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
- it.rinfo()->set_target_cell(*cell);
+ Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+ it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
// pointers to the first instruction in the code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(code->instruction_start());
+ it.rinfo()->set_target_address(code->instruction_start(),
+ SKIP_WRITE_BARRIER);
} else {
it.rinfo()->apply(delta);
}
@@ -6973,11 +7835,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
if (0 == deopt_count) return;
- PrintF(out, "%6s %6s %6s %12s\n", "index", "ast id", "argc",
+ PrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc",
FLAG_print_code_verbose ? "commands" : "");
for (int i = 0; i < deopt_count; i++) {
- PrintF(out, "%6d %6d %6d",
- i, AstId(i)->value(), ArgumentsStackHeight(i)->value());
+ PrintF(out, "%6d %6d %6d %6d",
+ i,
+ AstId(i)->value(),
+ ArgumentsStackHeight(i)->value(),
+ Pc(i)->value());
if (!FLAG_print_code_verbose) {
PrintF(out, "\n");
@@ -7138,7 +8003,7 @@ const char* Code::PropertyType2String(PropertyType type) {
case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return NULL;
}
@@ -7270,8 +8135,10 @@ static void CopySlowElementsToFast(NumberDictionary* source,
}
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
- int length) {
+MaybeObject* JSObject::SetFastElementsCapacityAndLength(
+ int capacity,
+ int length,
+ SetFastElementsCapacityMode set_capacity_mode) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
@@ -7288,16 +8155,27 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
Map* new_map = NULL;
if (elements()->map() != heap->non_strict_arguments_elements_map()) {
Object* object;
- MaybeObject* maybe = map()->GetFastElementsMap();
+ bool has_fast_smi_only_elements =
+ (set_capacity_mode == kAllowSmiOnlyElements) &&
+ (elements()->map()->has_fast_smi_only_elements() ||
+ elements() == heap->empty_fixed_array());
+ ElementsKind elements_kind = has_fast_smi_only_elements
+ ? FAST_SMI_ONLY_ELEMENTS
+ : FAST_ELEMENTS;
+ MaybeObject* maybe = GetElementsTransitionMap(elements_kind);
if (!maybe->ToObject(&object)) return maybe;
new_map = Map::cast(object);
}
- switch (GetElementsKind()) {
+ FixedArrayBase* old_elements_raw = elements();
+ ElementsKind elements_kind = GetElementsKind();
+ switch (elements_kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
- WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
- CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
+ WriteBarrierMode mode(new_elements->GetWriteBarrierMode(no_gc));
+ CopyFastElementsToFast(FixedArray::cast(old_elements_raw),
+ new_elements, mode);
set_map(new_map);
set_elements(new_elements);
break;
@@ -7305,7 +8183,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
case DICTIONARY_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
- CopySlowElementsToFast(NumberDictionary::cast(elements()),
+ CopySlowElementsToFast(NumberDictionary::cast(old_elements_raw),
new_elements,
mode);
set_map(new_map);
@@ -7317,7 +8195,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
// The object's map and the parameter map are unchanged, the unaliased
// arguments are copied to the new backing store.
- FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* parameter_map = FixedArray::cast(old_elements_raw);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
CopySlowElementsToFast(NumberDictionary::cast(arguments),
@@ -7330,7 +8208,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
break;
}
case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
+ FixedDoubleArray* old_elements = FixedDoubleArray::cast(old_elements_raw);
uint32_t old_length = static_cast<uint32_t>(old_elements->length());
// Fill out the new array with this content and array holes.
for (uint32_t i = 0; i < old_length; i++) {
@@ -7368,6 +8246,11 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
break;
}
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, elements_kind, old_elements_raw,
+ FAST_ELEMENTS, new_elements);
+ }
+
// Update the length if necessary.
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
@@ -7391,23 +8274,27 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
- { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
+ { MaybeObject* maybe_obj =
+ GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
+ FixedArrayBase* old_elements = elements();
+ ElementsKind elements_kind(GetElementsKind());
AssertNoAllocation no_gc;
- switch (GetElementsKind()) {
+ switch (elements_kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
- elems->Initialize(FixedArray::cast(elements()));
+ elems->Initialize(FixedArray::cast(old_elements));
break;
}
case FAST_DOUBLE_ELEMENTS: {
- elems->Initialize(FixedDoubleArray::cast(elements()));
+ elems->Initialize(FixedDoubleArray::cast(old_elements));
break;
}
case DICTIONARY_ELEMENTS: {
- elems->Initialize(NumberDictionary::cast(elements()));
+ elems->Initialize(NumberDictionary::cast(old_elements));
break;
}
default:
@@ -7415,6 +8302,11 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
break;
}
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, elements_kind, old_elements,
+ FAST_DOUBLE_ELEMENTS, elems);
+ }
+
ASSERT(new_map->has_fast_double_elements());
set_map(new_map);
ASSERT(elems->IsFixedDoubleArray());
@@ -7428,53 +8320,6 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
-MaybeObject* JSObject::SetSlowElements(Object* len) {
- // We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
-
- uint32_t new_length = static_cast<uint32_t>(len->Number());
-
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- case FAST_DOUBLE_ELEMENTS:
- // Make sure we never try to shrink dense arrays into sparse arrays.
- ASSERT(static_cast<uint32_t>(
- FixedArrayBase::cast(elements())->length()) <= new_length);
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
-
- // Update length for JSArrays.
- if (IsJSArray()) JSArray::cast(this)->set_length(len);
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (IsJSArray()) {
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(new_length, old_length),
- JSArray::cast(this)->set_length(len);
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return this;
-}
-
-
MaybeObject* JSArray::Initialize(int capacity) {
Heap* heap = GetHeap();
ASSERT(capacity >= 0);
@@ -7502,155 +8347,14 @@ void JSArray::Expand(int required_size) {
Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
- self->SetContent(*new_backing);
-}
-
-
-static Failure* ArrayLengthRangeError(Heap* heap) {
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *FACTORY->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ GetIsolate()->factory()->SetContent(self, new_backing);
}
MaybeObject* JSObject::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
-
- MaybeObject* maybe_smi_length = len->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
- if (value < 0) return ArrayLengthRangeError(GetHeap());
- ElementsKind elements_kind = GetElementsKind();
- switch (elements_kind) {
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- int old_capacity = FixedArrayBase::cast(elements())->length();
- if (value <= old_capacity) {
- if (IsJSArray()) {
- Object* obj;
- if (elements_kind == FAST_ELEMENTS) {
- MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- if (2 * value <= old_capacity) {
- // If more than half the elements won't be used, trim the array.
- if (value == 0) {
- initialize_elements();
- } else {
- Address filler_start;
- int filler_size;
- if (GetElementsKind() == FAST_ELEMENTS) {
- FixedArray* fast_elements = FixedArray::cast(elements());
- fast_elements->set_length(value);
- filler_start = fast_elements->address() +
- FixedArray::OffsetOfElementAt(value);
- filler_size = (old_capacity - value) * kPointerSize;
- } else {
- ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
- FixedDoubleArray* fast_double_elements =
- FixedDoubleArray::cast(elements());
- fast_double_elements->set_length(value);
- filler_start = fast_double_elements->address() +
- FixedDoubleArray::OffsetOfElementAt(value);
- filler_size = (old_capacity - value) * kDoubleSize;
- }
- GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
- }
- } else {
- // Otherwise, fill the unused tail with holes.
- int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- if (GetElementsKind() == FAST_ELEMENTS) {
- FixedArray* fast_elements = FixedArray::cast(elements());
- for (int i = value; i < old_length; i++) {
- fast_elements->set_the_hole(i);
- }
- } else {
- ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
- FixedDoubleArray* fast_double_elements =
- FixedDoubleArray::cast(elements());
- for (int i = value; i < old_length; i++) {
- fast_double_elements->set_the_hole(i);
- }
- }
- }
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- return this;
- }
- int min = NewElementsCapacity(old_capacity);
- int new_capacity = value > min ? value : min;
- if (!ShouldConvertToSlowElements(new_capacity)) {
- MaybeObject* result;
- if (GetElementsKind() == FAST_ELEMENTS) {
- result = SetFastElementsCapacityAndLength(new_capacity, value);
- } else {
- ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
- result = SetFastDoubleElementsCapacityAndLength(new_capacity,
- value);
- }
- if (result->IsFailure()) return result;
- return this;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (IsJSArray()) {
- if (value == 0) {
- // If the length of a slow array is reset to zero, we clear
- // the array and flush backing storage. This has the added
- // benefit that the array returns to fast mode.
- Object* obj;
- { MaybeObject* maybe_obj = ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- } else {
- // Remove deleted elements.
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(value, old_length);
- }
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- return this;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // General slow case.
- if (len->IsNumber()) {
- uint32_t length;
- if (len->ToArrayIndex(&length)) {
- return SetSlowElements(len);
- } else {
- return ArrayLengthRangeError(GetHeap());
- }
- }
-
- // len is not a number so make the array size one and
- // set only element to len.
- Object* obj;
- { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray::cast(obj)->set(0, len);
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
- set_elements(FixedArray::cast(obj));
- return this;
+ return GetElementsAccessor()->SetLength(this, len);
}
@@ -7693,7 +8397,7 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
FixedArray* new_cache;
// Grow array by factor 2 over and above what we need.
{ MaybeObject* maybe_cache =
- heap()->AllocateFixedArray(transitions * 2 * step + header);
+ GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
}
@@ -7746,7 +8450,7 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
- if (JSObject::cast(pt) == this) {
+ if (JSReceiver::cast(pt) == this) {
// Cycle detected.
HandleScope scope(heap->isolate());
return heap->isolate()->Throw(
@@ -7761,8 +8465,8 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// hidden and set the new prototype on that object.
Object* current_proto = real_receiver->GetPrototype();
while (current_proto->IsJSObject() &&
- JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
- real_receiver = JSObject::cast(current_proto);
+ JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
+ real_receiver = JSReceiver::cast(current_proto);
current_proto = current_proto->GetPrototype();
}
}
@@ -7795,8 +8499,21 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
}
+MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
+ uint32_t first_arg,
+ uint32_t arg_count) {
+ // Elements in |Arguments| are ordered backwards (because they're on the
+ // stack), but the method that's called here iterates over them in forward
+ // direction.
+ return EnsureCanContainElements(
+ args->arguments() - first_arg - (arg_count - 1),
+ arg_count);
+}
+
+
bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -7857,6 +8574,11 @@ bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
Object* pt = GetPrototype();
if (pt->IsNull()) return false;
+ if (pt->IsJSProxy()) {
+ // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+ receiver, index) != ABSENT;
+ }
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
@@ -7933,6 +8655,7 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -8047,6 +8770,7 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
ElementsKind kind = GetElementsKind();
switch (kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -8113,6 +8837,11 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
Object* pt = GetPrototype();
if (pt->IsNull()) return false;
+ if (pt->IsJSProxy()) {
+ // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+ receiver, index) != ABSENT;
+ }
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
@@ -8189,9 +8918,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
// __defineGetter__ callback
if (structure->IsFixedArray()) {
Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
- if (getter->IsJSFunction()) {
- return Object::GetPropertyWithDefinedGetter(receiver,
- JSFunction::cast(getter));
+ if (getter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
}
// Getter is not a function.
return isolate->heap()->undefined_value();
@@ -8246,8 +8975,9 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
if (structure->IsFixedArray()) {
Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
- if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
+ if (setter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
@@ -8297,7 +9027,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastElements() || HasFastArgumentsElements());
+ ASSERT(HasFastTypeElements() ||
+ HasFastArgumentsElements());
FixedArray* backing_store = FixedArray::cast(elements());
if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
@@ -8308,10 +9039,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if (!maybe->ToObject(&writable)) return maybe;
backing_store = FixedArray::cast(writable);
}
- uint32_t length = static_cast<uint32_t>(backing_store->length());
+ uint32_t capacity = static_cast<uint32_t>(backing_store->length());
if (check_prototype &&
- (index >= length || backing_store->get(index)->IsTheHole())) {
+ (index >= capacity || backing_store->get(index)->IsTheHole())) {
bool found;
MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
value,
@@ -8320,39 +9051,75 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if (found) return result;
}
- // Check whether there is extra space in fixed array.
- if (index < length) {
- backing_store->set(index, value);
- if (IsJSArray()) {
- // Update the length of the array if needed.
- uint32_t array_length = 0;
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
- if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ uint32_t new_capacity = capacity;
+ // Check if the length property of this object needs to be updated.
+ uint32_t array_length = 0;
+ bool must_update_array_length = false;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ if (index >= array_length) {
+ must_update_array_length = true;
+ array_length = index + 1;
+ }
+ }
+ // Check if the capacity of the backing store needs to be increased, or if
+ // a transition to slow elements is necessary.
+ if (index >= capacity) {
+ bool convert_to_slow = true;
+ if ((index - capacity) < kMaxGap) {
+ new_capacity = NewElementsCapacity(index + 1);
+ ASSERT(new_capacity > index);
+ if (!ShouldConvertToSlowElements(new_capacity)) {
+ convert_to_slow = false;
}
}
+ if (convert_to_slow) {
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
+ return SetDictionaryElement(index, value, strict_mode, check_prototype);
+ }
+ }
+ // Convert to fast double elements if appropriate.
+ if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
+ MaybeObject* maybe =
+ SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
+ if (maybe->IsFailure()) return maybe;
+ FixedDoubleArray::cast(elements())->set(index, value->Number());
return value;
}
-
- // Allow gap in fast case.
- if ((index - length) < kMaxGap) {
- // Try allocating extra space.
- int new_capacity = NewElementsCapacity(index + 1);
- if (!ShouldConvertToSlowElements(new_capacity)) {
- ASSERT(static_cast<uint32_t>(new_capacity) > index);
- Object* new_elements;
- MaybeObject* maybe =
- SetFastElementsCapacityAndLength(new_capacity, index + 1);
- if (!maybe->ToObject(&new_elements)) return maybe;
- FixedArray::cast(new_elements)->set(index, value);
- return value;
- }
+ // Change elements kind from SMI_ONLY to generic FAST if necessary.
+ if (HasFastSmiOnlyElements() && !value->IsSmi()) {
+ MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
+ Map* new_map;
+ if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
+ set_map(new_map);
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
+ FAST_ELEMENTS, elements());
+ }
+ }
+ // Increase backing store capacity if that's been decided previously.
+ if (new_capacity != capacity) {
+ Object* new_elements;
+ SetFastElementsCapacityMode set_capacity_mode =
+ value->IsSmi() && HasFastSmiOnlyElements()
+ ? kAllowSmiOnlyElements
+ : kDontAllowSmiOnlyElements;
+ MaybeObject* maybe =
+ SetFastElementsCapacityAndLength(new_capacity,
+ array_length,
+ set_capacity_mode);
+ if (!maybe->ToObject(&new_elements)) return maybe;
+ FixedArray::cast(new_elements)->set(index, value);
+ return value;
}
-
- // Otherwise default to slow case.
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
- return SetDictionaryElement(index, value, strict_mode, check_prototype);
+ // Finally, set the new element and length.
+ ASSERT(elements()->IsFixedArray());
+ backing_store->set(index, value);
+ if (must_update_array_length) {
+ JSArray::cast(this)->set_length(Smi::FromInt(array_length));
+ }
+ return value;
}
@@ -8448,7 +9215,9 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
MaybeObject* result = CanConvertToFastDoubleElements()
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : SetFastElementsCapacityAndLength(new_length, new_length);
+ : SetFastElementsCapacityAndLength(new_length,
+ new_length,
+ kDontAllowSmiOnlyElements);
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -8492,10 +9261,15 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
}
- MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(elms_length, length);
+ MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
+ elms_length,
+ length,
+ kDontAllowSmiOnlyElements);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return SetFastElement(index, value, strict_mode, check_prototype);
+ return SetFastElement(index,
+ value,
+ strict_mode,
+ check_prototype);
}
double double_value = value_is_smi
@@ -8546,6 +9320,17 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
}
+MaybeObject* JSReceiver::SetElement(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_proto) {
+ return IsJSProxy()
+ ? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode)
+ : JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto)
+ ;
+}
+
+
MaybeObject* JSObject::SetElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
@@ -8592,6 +9377,7 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
bool check_prototype) {
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
return SetFastElement(index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
@@ -8667,6 +9453,54 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
}
+MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
+ ElementsKind to_kind) {
+ ElementsKind from_kind = map()->elements_kind();
+ FixedArrayBase* elms = FixedArrayBase::cast(elements());
+ uint32_t capacity = static_cast<uint32_t>(elms->length());
+ uint32_t length = capacity;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ }
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (to_kind == FAST_DOUBLE_ELEMENTS) {
+ MaybeObject* maybe_result =
+ SetFastDoubleElementsCapacityAndLength(capacity, length);
+ if (maybe_result->IsFailure()) return maybe_result;
+ return this;
+ } else if (to_kind == FAST_ELEMENTS) {
+ MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
+ Map* new_map;
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, from_kind, elms, FAST_ELEMENTS, elms);
+ }
+ set_map(new_map);
+ return this;
+ }
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
+ capacity, length, kDontAllowSmiOnlyElements);
+ if (maybe_result->IsFailure()) return maybe_result;
+ return this;
+ }
+ // This method should never be called for any other case than the ones
+ // handled above.
+ UNREACHABLE();
+ return GetIsolate()->heap()->null_value();
+}
+
+
+// static
+bool Map::IsValidElementsTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ return
+ (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
+ (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
+}
+
+
MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
Object* value) {
uint32_t old_len = 0;
@@ -8754,6 +9588,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
break;
}
// Fall through.
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
*capacity = backing_store->length();
@@ -8932,7 +9767,7 @@ MaybeObject* JSObject::GetPropertyPostInterceptor(
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsProperty()) {
return GetProperty(receiver, &result, name, attributes);
@@ -8950,7 +9785,7 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
- LookupResult result;
+ LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
if (result.IsProperty()) {
return GetProperty(receiver, &result, name, attributes);
@@ -9001,15 +9836,15 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
bool JSObject::HasRealNamedProperty(String* key) {
// Check access rights if needed.
+ Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
}
- LookupResult result;
+ LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
return result.IsProperty() && (result.type() != INTERCEPTOR);
}
@@ -9029,6 +9864,7 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
if (this->IsStringObjectWithCharacterAt(index)) return true;
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
@@ -9077,15 +9913,15 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
bool JSObject::HasRealNamedCallbackProperty(String* key) {
// Check access rights if needed.
+ Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
}
- LookupResult result;
+ LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
return result.IsProperty() && (result.type() == CALLBACKS);
}
@@ -9119,8 +9955,8 @@ void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
set(j, temp);
if (this != numbers) {
temp = numbers->get(i);
- numbers->set(i, numbers->get(j));
- numbers->set(j, temp);
+ numbers->set(i, Smi::cast(numbers->get(j)));
+ numbers->set(j, Smi::cast(temp));
}
}
@@ -9268,6 +10104,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
switch (GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
@@ -9432,70 +10269,87 @@ class StringSharedKey : public HashTableKey {
public:
StringSharedKey(String* source,
SharedFunctionInfo* shared,
- StrictModeFlag strict_mode)
+ LanguageMode language_mode,
+ int scope_position)
: source_(source),
shared_(shared),
- strict_mode_(strict_mode) { }
+ language_mode_(language_mode),
+ scope_position_(scope_position) { }
bool IsMatch(Object* other) {
if (!other->IsFixedArray()) return false;
- FixedArray* pair = FixedArray::cast(other);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
+ FixedArray* other_array = FixedArray::cast(other);
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
if (shared != shared_) return false;
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
- Smi::cast(pair->get(2))->value());
- if (strict_mode != strict_mode_) return false;
- String* source = String::cast(pair->get(1));
+ int language_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(language_unchecked == CLASSIC_MODE ||
+ language_unchecked == STRICT_MODE ||
+ language_unchecked == EXTENDED_MODE);
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ if (language_mode != language_mode_) return false;
+ int scope_position = Smi::cast(other_array->get(3))->value();
+ if (scope_position != scope_position_) return false;
+ String* source = String::cast(other_array->get(1));
return source->Equals(source_);
}
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared,
- StrictModeFlag strict_mode) {
+ LanguageMode language_mode,
+ int scope_position) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash
// code computation, we use a combination of the hash of the
- // script source code and the start and end positions. We do
- // this to ensure that the cache entries can survive garbage
+ // script source code and the start position of the calling scope.
+ // We do this to ensure that the cache entries can survive garbage
// collection.
Script* script = Script::cast(shared->script());
hash ^= String::cast(script->source())->Hash();
- if (strict_mode == kStrictMode) hash ^= 0x8000;
- hash += shared->start_position();
+ if (language_mode == STRICT_MODE) hash ^= 0x8000;
+ if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
+ hash += scope_position;
}
return hash;
}
uint32_t Hash() {
- return StringSharedHashHelper(source_, shared_, strict_mode_);
+ return StringSharedHashHelper(
+ source_, shared_, language_mode_, scope_position_);
}
uint32_t HashForObject(Object* obj) {
- FixedArray* pair = FixedArray::cast(obj);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
- String* source = String::cast(pair->get(1));
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
- Smi::cast(pair->get(2))->value());
- return StringSharedHashHelper(source, shared, strict_mode);
+ FixedArray* other_array = FixedArray::cast(obj);
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
+ String* source = String::cast(other_array->get(1));
+ int language_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(language_unchecked == CLASSIC_MODE ||
+ language_unchecked == STRICT_MODE ||
+ language_unchecked == EXTENDED_MODE);
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ int scope_position = Smi::cast(other_array->get(3))->value();
+ return StringSharedHashHelper(
+ source, shared, language_mode, scope_position);
}
MUST_USE_RESULT MaybeObject* AsObject() {
Object* obj;
- { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
+ { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, shared_);
- pair->set(1, source_);
- pair->set(2, Smi::FromInt(strict_mode_));
- return pair;
+ FixedArray* other_array = FixedArray::cast(obj);
+ other_array->set(0, shared_);
+ other_array->set(1, source_);
+ other_array->set(2, Smi::FromInt(language_mode_));
+ other_array->set(3, Smi::FromInt(scope_position_));
+ return other_array;
}
private:
String* source_;
SharedFunctionInfo* shared_;
- StrictModeFlag strict_mode_;
+ LanguageMode language_mode_;
+ int scope_position_;
};
@@ -9823,14 +10677,14 @@ int StringDictionary::FindEntry(String* key) {
if (element->IsUndefined()) break; // Empty entry.
if (key == element) return entry;
if (!element->IsSymbol() &&
- !element->IsNull() &&
+ !element->IsTheHole() &&
String::cast(element)->Equals(key)) {
// Replace a non-symbol key by the equivalent symbol for faster further
// lookups.
set(index, key);
return entry;
}
- ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
+ ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
@@ -9934,7 +10788,7 @@ uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
- if (element->IsUndefined() || element->IsNull()) break;
+ if (element->IsUndefined() || element->IsTheHole()) break;
entry = NextProbe(entry, count++, capacity);
}
return entry;
@@ -9949,7 +10803,9 @@ template class HashTable<CompilationCacheShape, HashTableKey*>;
template class HashTable<MapCacheShape, HashTableKey*>;
-template class HashTable<ObjectHashTableShape, JSObject*>;
+template class HashTable<ObjectHashTableShape<1>, Object*>;
+
+template class HashTable<ObjectHashTableShape<2>, Object*>;
template class Dictionary<StringDictionaryShape, String*>;
@@ -10133,8 +10989,6 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// If the object is in dictionary mode, it is converted to fast elements
// mode.
MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- ASSERT(!HasExternalArrayElements());
-
Heap* heap = GetHeap();
if (HasDictionaryElements()) {
@@ -10148,7 +11002,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Convert to fast elements.
Object* obj;
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+ { MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
@@ -10164,13 +11018,16 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
set_map(new_map);
set_elements(fast_elements);
+ } else if (HasExternalArrayElements()) {
+ // External arrays cannot have holes or undefined elements.
+ return Smi::FromInt(ExternalArray::cast(elements())->length());
} else if (!HasFastDoubleElements()) {
Object* obj;
{ MaybeObject* maybe_obj = EnsureWritableFastElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
- ASSERT(HasFastElements() || HasFastDoubleElements());
+ ASSERT(HasFastTypeElements() || HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
@@ -10439,6 +11296,16 @@ JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
}
+Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
+ Handle<GlobalObject> global,
+ Handle<String> name) {
+ Isolate* isolate = global->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ global->EnsurePropertyCell(*name),
+ JSGlobalPropertyCell);
+}
+
+
MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
ASSERT(!HasFastProperties());
int entry = property_dictionary()->FindEntry(name);
@@ -10637,8 +11504,12 @@ Object* CompilationCacheTable::Lookup(String* src) {
Object* CompilationCacheTable::LookupEval(String* src,
Context* context,
- StrictModeFlag strict_mode) {
- StringSharedKey key(src, context->closure()->shared(), strict_mode);
+ LanguageMode language_mode,
+ int scope_position) {
+ StringSharedKey key(src,
+ context->closure()->shared(),
+ language_mode,
+ scope_position);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -10673,10 +11544,12 @@ MaybeObject* CompilationCacheTable::Put(String* src, Object* value) {
MaybeObject* CompilationCacheTable::PutEval(String* src,
Context* context,
- SharedFunctionInfo* value) {
+ SharedFunctionInfo* value,
+ int scope_position) {
StringSharedKey key(src,
context->closure()->shared(),
- value->strict_mode() ? kStrictMode : kNonStrictMode);
+ value->language_mode(),
+ scope_position);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -10720,13 +11593,13 @@ MaybeObject* CompilationCacheTable::PutRegExp(String* src,
void CompilationCacheTable::Remove(Object* value) {
- Object* null_value = GetHeap()->null_value();
+ Object* the_hole_value = GetHeap()->the_hole_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
if (get(value_index) == value) {
- fast_set(this, entry_index, null_value);
- fast_set(this, value_index, null_value);
+ NoWriteBarrierSet(this, entry_index, the_hole_value);
+ NoWriteBarrierSet(this, value_index, the_hole_value);
ElementRemoved();
}
}
@@ -10879,30 +11752,6 @@ MaybeObject* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) {
}
-void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
- // Do nothing if the interval [from, to) is empty.
- if (from >= to) return;
-
- Heap* heap = GetHeap();
- int removed_entries = 0;
- Object* sentinel = heap->null_value();
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* key = KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (from <= number && number < to) {
- SetEntry(i, sentinel, sentinel);
- removed_entries++;
- }
- }
- }
-
- // Update the number of elements.
- ElementsRemoved(removed_entries);
-}
-
-
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
JSReceiver::DeleteMode mode) {
@@ -10912,7 +11761,7 @@ Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
return heap->false_value();
}
- SetEntry(entry, heap->null_value(), heap->null_value());
+ SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
HashTable<Shape, Key>::ElementRemoved();
return heap->true_value();
}
@@ -11198,14 +12047,15 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
// Allocate the instance descriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked =
+ DescriptorArray* descriptors;
+ { MaybeObject* maybe_descriptors =
DescriptorArray::Allocate(instance_descriptor_length);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
+ if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
+ return maybe_descriptors;
}
}
- DescriptorArray* descriptors = DescriptorArray::cast(descriptors_unchecked);
+
+ DescriptorArray::WhitenessWitness witness(descriptors);
int inobject_props = obj->map()->inobject_properties();
int number_of_allocated_fields =
@@ -11243,7 +12093,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
JSFunction::cast(value),
details.attributes(),
details.index());
- descriptors->Set(next_descriptor++, &d);
+ descriptors->Set(next_descriptor++, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
obj->InObjectPropertyAtPut(current_offset,
@@ -11257,13 +12107,13 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
current_offset++,
details.attributes(),
details.index());
- descriptors->Set(next_descriptor++, &d);
+ descriptors->Set(next_descriptor++, &d, witness);
} else if (type == CALLBACKS) {
CallbacksDescriptor d(String::cast(key),
value,
details.attributes(),
details.index());
- descriptors->Set(next_descriptor++, &d);
+ descriptors->Set(next_descriptor++, &d, witness);
} else {
UNREACHABLE();
}
@@ -11271,7 +12121,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
ASSERT(current_offset == number_of_fields);
- descriptors->Sort();
+ descriptors->Sort(witness);
// Allocate new map.
Object* new_map;
{ MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
@@ -11294,20 +12144,84 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
-Object* ObjectHashTable::Lookup(JSObject* key) {
+bool ObjectHashSet::Contains(Object* key) {
+ ASSERT(IsKey(key));
+
// If the object does not have an identity hash, it was never used as a key.
- MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
- if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
+ { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+ if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
+ }
+ return (FindEntry(key) != kNotFound);
+}
+
+
+MaybeObject* ObjectHashSet::Add(Object* key) {
+ ASSERT(IsKey(key));
+
+ // Make sure the key object has an identity hash code.
+ int hash;
+ { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
+ if (maybe_hash->IsFailure()) return maybe_hash;
+ hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+ }
+ int entry = FindEntry(key);
+
+ // Check whether key is already present.
+ if (entry != kNotFound) return this;
+
+ // Check whether the hash set should be extended and add entry.
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, key);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ ObjectHashSet* table = ObjectHashSet::cast(obj);
+ entry = table->FindInsertionEntry(hash);
+ table->set(EntryToIndex(entry), key);
+ table->ElementAdded();
+ return table;
+}
+
+
+MaybeObject* ObjectHashSet::Remove(Object* key) {
+ ASSERT(IsKey(key));
+
+ // If the object does not have an identity hash, it was never used as a key.
+ { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+ if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
+ }
+ int entry = FindEntry(key);
+
+ // Check whether key is actually present.
+ if (entry == kNotFound) return this;
+
+ // Remove entry and try to shrink this hash set.
+ set_the_hole(EntryToIndex(entry));
+ ElementRemoved();
+ return Shrink(key);
+}
+
+
+Object* ObjectHashTable::Lookup(Object* key) {
+ ASSERT(IsKey(key));
+
+ // If the object does not have an identity hash, it was never used as a key.
+ { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+ if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
+ return GetHeap()->undefined_value();
+ }
+ }
int entry = FindEntry(key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
-MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
+MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
+ ASSERT(IsKey(key));
+
// Make sure the key object has an identity hash code.
int hash;
- { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
+ { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
@@ -11337,16 +12251,16 @@ MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
}
-void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
+void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
set(EntryToIndex(entry), key);
set(EntryToIndex(entry) + 1, value);
ElementAdded();
}
-void ObjectHashTable::RemoveEntry(int entry, Heap* heap) {
- set_null(heap, EntryToIndex(entry));
- set_null(heap, EntryToIndex(entry) + 1);
+void ObjectHashTable::RemoveEntry(int entry) {
+ set_the_hole(EntryToIndex(entry));
+ set_the_hole(EntryToIndex(entry) + 1);
ElementRemoved();
}
@@ -11601,7 +12515,7 @@ int BreakPointInfo::GetBreakPointCount() {
// Multiple break points.
return FixedArray::cast(break_point_objects())->length();
}
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index d9c7a8227..6c88cc01a 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "builtins.h"
#include "list.h"
+#include "property-details.h"
#include "smart-array-pointer.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
@@ -38,6 +39,8 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#endif
+#include "v8checks.h"
+
//
// Most object types in the V8 JavaScript are described in this file.
@@ -51,6 +54,8 @@
// - JSReceiver (suitable for property access)
// - JSObject
// - JSArray
+// - JSSet
+// - JSMap
// - JSWeakMap
// - JSRegExp
// - JSFunction
@@ -74,7 +79,7 @@
// - MapCache
// - Context
// - JSFunctionResultCache
-// - SerializedScopeInfo
+// - ScopeInfo
// - FixedDoubleArray
// - ExternalArray
// - ExternalPixelArray
@@ -120,24 +125,17 @@
// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
// Failure: [30 bit signed int] 11
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
- ABSENT = 16 // Used in runtime to indicate a property is absent.
- // ABSENT can never be stored in or returned from a descriptor's attributes
- // bitfield. It is only used as a return value meaning the attributes of
- // a non-existent property.
-};
-
namespace v8 {
namespace internal {
enum ElementsKind {
- // The "fast" kind for tagged values. Must be first to make it possible
- // to efficiently check maps if they have fast elements.
+ // The "fast" kind for elements that only contain SMI values. Must be first
+ // to make it possible to efficiently check maps for this kind.
+ FAST_SMI_ONLY_ELEMENTS,
+
+ // The "fast" kind for tagged values. Must be second to make it possible to
+ // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+ // together at once.
FAST_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
@@ -160,101 +158,16 @@ enum ElementsKind {
// Derived constants from ElementsKind
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+ FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
};
-static const int kElementsKindCount =
- LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- int index = 0) {
- ASSERT(type != ELEMENTS_TRANSITION);
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(index));
-
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | StorageField::encode(index);
-
- ASSERT(type == this->type());
- ASSERT(attributes == this->attributes());
- ASSERT(index == this->index());
- }
-
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- ElementsKind elements_kind) {
- ASSERT(type == ELEMENTS_TRANSITION);
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
-
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | StorageField::encode(static_cast<int>(elements_kind));
+const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
- ASSERT(type == this->type());
- ASSERT(attributes == this->attributes());
- ASSERT(elements_kind == this->elements_kind());
- }
-
- // Conversion for storing details as Object*.
- explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
-
- PropertyType type() { return TypeField::decode(value_); }
-
- bool IsTransition() {
- PropertyType t = type();
- ASSERT(t != INTERCEPTOR);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
- t == ELEMENTS_TRANSITION;
- }
-
- bool IsProperty() {
- return type() < FIRST_PHANTOM_PROPERTY_TYPE;
- }
-
- PropertyAttributes attributes() { return AttributesField::decode(value_); }
-
- int index() { return StorageField::decode(value_); }
-
- ElementsKind elements_kind() {
- ASSERT(type() == ELEMENTS_TRANSITION);
- return static_cast<ElementsKind>(StorageField::decode(value_));
- }
-
- inline PropertyDetails AsDeleted();
-
- static bool IsValidIndex(int index) {
- return StorageField::is_valid(index);
- }
-
- bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
- bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
- bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
- bool IsDeleted() { return DeletedField::decode(value_) != 0;}
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 4> {};
- class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
- class DeletedField: public BitField<uint32_t, 7, 1> {};
- class StorageField: public BitField<uint32_t, 8, 32-8> {};
-
- static const int kInitialIndex = 1;
-
- private:
- uint32_t value_;
-};
+void PrintElementsKind(FILE* out, ElementsKind kind);
+inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind);
// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -276,8 +189,15 @@ enum NormalizedMapSharingMode {
};
+// Indicates whether a get method should implicitly create the object looked up.
+enum CreationFlag {
+ ALLOW_CREATION,
+ OMIT_CREATION
+};
+
+
// Instance size sentinel for objects of variable size.
-static const int kVariableSizeSentinel = 0;
+const int kVariableSizeSentinel = 0;
// All Maps have a field instance_type containing a InstanceType.
@@ -311,6 +231,9 @@ static const int kVariableSizeSentinel = 0;
V(EXTERNAL_SYMBOL_TYPE) \
V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
V(EXTERNAL_ASCII_SYMBOL_TYPE) \
+ V(SHORT_EXTERNAL_SYMBOL_TYPE) \
+ V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
+ V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \
V(STRING_TYPE) \
V(ASCII_STRING_TYPE) \
V(CONS_STRING_TYPE) \
@@ -319,6 +242,9 @@ static const int kVariableSizeSentinel = 0;
V(EXTERNAL_STRING_TYPE) \
V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(EXTERNAL_ASCII_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
+ V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
\
V(MAP_TYPE) \
@@ -329,6 +255,7 @@ static const int kVariableSizeSentinel = 0;
V(HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
+ V(FREE_SPACE_TYPE) \
/* Note: the order of these external array */ \
/* types is relied upon in */ \
/* Object::IsExternalArray(). */ \
@@ -418,6 +345,18 @@ static const int kVariableSizeSentinel = 0;
ExternalAsciiString::kSize, \
external_ascii_symbol, \
ExternalAsciiSymbol) \
+ V(SHORT_EXTERNAL_SYMBOL_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_symbol, \
+ ShortExternalSymbol) \
+ V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_symbol_with_ascii_data, \
+ ShortExternalSymbolWithAsciiData) \
+ V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \
+ ExternalAsciiString::kShortSize, \
+ short_external_ascii_symbol, \
+ ShortExternalAsciiSymbol) \
V(STRING_TYPE, \
kVariableSizeSentinel, \
string, \
@@ -453,7 +392,19 @@ static const int kVariableSizeSentinel = 0;
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
- ExternalAsciiString)
+ ExternalAsciiString) \
+ V(SHORT_EXTERNAL_STRING_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_string, \
+ ShortExternalString) \
+ V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_string_with_ascii_data, \
+ ShortExternalStringWithAsciiData) \
+ V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
+ ExternalAsciiString::kShortSize, \
+ short_external_ascii_string, \
+ ShortExternalAsciiString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -537,6 +488,11 @@ STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
const uint32_t kAsciiDataHintMask = 0x08;
const uint32_t kAsciiDataHintTag = 0x08;
+// If bit 7 is clear and string representation indicates an external string,
+// then bit 4 indicates whether the data pointer is cached.
+const uint32_t kShortExternalStringMask = 0x10;
+const uint32_t kShortExternalStringTag = 0x10;
+
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector unless it is a
@@ -556,6 +512,13 @@ enum InstanceType {
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
+ SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
+ kExternalStringTag | kShortExternalStringTag,
+ SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
+ kTwoByteStringTag | kSymbolTag | kExternalStringTag |
+ kAsciiDataHintTag | kShortExternalStringTag,
+ SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
+ kSymbolTag | kShortExternalStringTag,
EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
@@ -567,6 +530,13 @@ enum InstanceType {
CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
+ SHORT_EXTERNAL_STRING_TYPE =
+ kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
+ SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+ kTwoByteStringTag | kExternalStringTag |
+ kAsciiDataHintTag | kShortExternalStringTag,
+ SHORT_EXTERNAL_ASCII_STRING_TYPE =
+ kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
@@ -585,6 +555,7 @@ enum InstanceType {
HEAP_NUMBER_TYPE,
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
+ FREE_SPACE_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
@@ -621,24 +592,32 @@ enum InstanceType {
JS_MESSAGE_OBJECT_TYPE,
- JS_VALUE_TYPE, // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
+ // All the following types are subtypes of JSReceiver, which corresponds to
+ // objects in the JS sense. The first and the last type in this range are
+ // the two forms of function. This organization enables using the same
+ // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
+ // NONCALLABLE_JS_OBJECT range.
+ JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
+ JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
+
+ JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
- JS_PROXY_TYPE,
+ JS_SET_TYPE,
+ JS_MAP_TYPE,
JS_WEAK_MAP_TYPE,
- JS_REGEXP_TYPE, // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
+ JS_REGEXP_TYPE,
- JS_FUNCTION_TYPE, // FIRST_CALLABLE_SPEC_OBJECT_TYPE
- JS_FUNCTION_PROXY_TYPE, // LAST_CALLABLE_SPEC_OBJECT_TYPE
+ JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
// Pseudo-types
FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
+ LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
// Boundaries for testing for an external array.
@@ -651,21 +630,27 @@ enum InstanceType {
// are not continuous in this enum! The enum ranges instead reflect the
// external class names, where proxies are treated as either ordinary objects,
// or functions.
- FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+ FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
LAST_JS_RECEIVER_TYPE = LAST_TYPE,
+ // Boundaries for testing the types represented as JSObject
+ FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+ LAST_JS_OBJECT_TYPE = LAST_TYPE,
+ // Boundaries for testing the types represented as JSProxy
+ FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
+ LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
+ // Boundaries for testing whether the type is a JavaScript object.
+ FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
+ LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
// Boundaries for testing the types for which typeof is "object".
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
- // Boundaries for testing the types for which typeof is "function".
- FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
- LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
- // Boundaries for testing whether the type is a JavaScript object.
- FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
+ // Note that the types for which typeof is "function" are not continuous.
+ // Define this so that we can put assertions on discrete checks.
+ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
};
-static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
- FIRST_EXTERNAL_ARRAY_TYPE + 1;
+const int kExternalArrayTypeCount =
+ LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
@@ -697,6 +682,7 @@ class ElementsAccessor;
class FixedArrayBase;
class ObjectVisitor;
class StringStream;
+class Failure;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -710,7 +696,6 @@ struct ValueInfo : public Malloced {
// A template-ized version of the IsXXX functions.
template <class C> static inline bool Is(Object* obj);
-class Failure;
class MaybeObject BASE_EMBEDDED {
public:
@@ -748,7 +733,7 @@ class MaybeObject BASE_EMBEDDED {
// Prints this object with details.
inline void Print() {
Print(stdout);
- };
+ }
inline void PrintLn() {
PrintLn(stdout);
}
@@ -791,6 +776,7 @@ class MaybeObject BASE_EMBEDDED {
V(ExternalDoubleArray) \
V(ExternalPixelArray) \
V(ByteArray) \
+ V(FreeSpace) \
V(JSReceiver) \
V(JSObject) \
V(JSContextExtensionObject) \
@@ -802,7 +788,7 @@ class MaybeObject BASE_EMBEDDED {
V(FixedDoubleArray) \
V(Context) \
V(GlobalContext) \
- V(SerializedScopeInfo) \
+ V(ScopeInfo) \
V(JSFunction) \
V(Code) \
V(Oddball) \
@@ -815,6 +801,8 @@ class MaybeObject BASE_EMBEDDED {
V(JSArray) \
V(JSProxy) \
V(JSFunctionProxy) \
+ V(JSSet) \
+ V(JSMap) \
V(JSWeakMap) \
V(JSRegExp) \
V(HashTable) \
@@ -835,6 +823,9 @@ class MaybeObject BASE_EMBEDDED {
V(AccessCheckNeeded) \
V(JSGlobalPropertyCell) \
+
+class JSReceiver;
+
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -849,6 +840,8 @@ class Object : public MaybeObject {
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
+ inline bool IsFixedArrayBase();
+
// Returns true if this object is an instance of the specified
// function template.
inline bool IsInstanceOf(FunctionTemplateInfo* type);
@@ -859,6 +852,7 @@ class Object : public MaybeObject {
#undef DECLARE_STRUCT_PREDICATE
INLINE(bool IsSpecObject());
+ INLINE(bool IsSpecFunction());
// Oddball testing.
INLINE(bool IsUndefined());
@@ -867,6 +861,10 @@ class Object : public MaybeObject {
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
+ inline bool NonFailureIsHeapObject();
+
+ // Filler objects (fillers and free space objects).
+ inline bool IsFiller();
// Extract the number.
inline double Number();
@@ -899,20 +897,22 @@ class Object : public MaybeObject {
Object* receiver,
String* key,
PropertyAttributes* attributes);
+
+ static Handle<Object> GetProperty(Handle<Object> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<String> key,
+ PropertyAttributes* attributes);
+
MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
LookupResult* result,
String* key,
PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name,
- Object* holder);
- MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
- String* name,
- Object* handler);
+
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
- JSFunction* getter);
+ JSReceiver* getter);
+ static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
inline MaybeObject* GetElement(uint32_t index);
// For use when we know that no exception can be thrown.
inline Object* GetElementNoExceptionThrown(uint32_t index);
@@ -921,6 +921,16 @@ class Object : public MaybeObject {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype();
+ // Returns the permanent hash code associated with this object depending on
+ // the actual object type. Might return a failure in case no hash was
+ // created yet or GC was caused by creation.
+ MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
+
+ // Checks whether this object has the same value as the given one. This
+ // function is implemented according to ES5, section 9.12 and can be used
+ // to implement the Harmony "egal" function.
+ bool SameValue(Object* other);
+
// Tries to convert an object to an array index. Returns true and sets
// the output parameter if it succeeds.
inline bool ToArrayIndex(uint32_t* index);
@@ -1095,101 +1105,13 @@ class MapWord BASE_EMBEDDED {
// View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress();
- // Marking phase of full collection: the map word of live objects is
- // marked, and may be marked as overflowed (eg, the object is live, its
- // children have not been visited, and it does not fit in the marking
- // stack).
-
- // True if this map word's mark bit is set.
- inline bool IsMarked();
-
- // Return this map word but with its mark bit set.
- inline void SetMark();
-
- // Return this map word but with its mark bit cleared.
- inline void ClearMark();
-
- // True if this map word's overflow bit is set.
- inline bool IsOverflowed();
-
- // Return this map word but with its overflow bit set.
- inline void SetOverflow();
-
- // Return this map word but with its overflow bit cleared.
- inline void ClearOverflow();
-
-
- // Compacting phase of a full compacting collection: the map word of live
- // objects contains an encoding of the original map address along with the
- // forwarding address (represented as an offset from the first live object
- // in the same page as the (old) object address).
-
- // Create a map word from a map address and a forwarding address offset.
- static inline MapWord EncodeAddress(Address map_address, int offset);
-
- // Return the map address encoded in this map word.
- inline Address DecodeMapAddress(MapSpace* map_space);
-
- // Return the forwarding offset encoded in this map word.
- inline int DecodeOffset();
-
-
- // During serialization: the map word is used to hold an encoded
- // address, and possibly a mark bit (set and cleared with SetMark
- // and ClearMark).
-
- // Create a map word from an encoded address.
- static inline MapWord FromEncodedAddress(Address address);
-
- inline Address ToEncodedAddress();
-
- // Bits used by the marking phase of the garbage collector.
- //
- // The first word of a heap object is normally a map pointer. The last two
- // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
- // mark an object as live and/or overflowed:
- // last bit = 0, marked as alive
- // second bit = 1, overflowed
- // An object is only marked as overflowed when it is marked as live while
- // the marking stack is overflowed.
- static const int kMarkingBit = 0; // marking bit
- static const int kMarkingMask = (1 << kMarkingBit); // marking mask
- static const int kOverflowBit = 1; // overflow bit
- static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
-
- // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
- // used.
- // +-----------------+------------------+-----------------+
- // |forwarding offset|page offset of map|page index of map|
- // +-----------------+------------------+-----------------+
- // ^ ^ ^
- // | | |
- // | | kMapPageIndexBits
- // | kMapPageOffsetBits
- // kForwardingOffsetBits
- static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
- static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
-#ifdef V8_HOST_ARCH_64_BIT
- static const int kMapPageIndexBits = 16;
-#else
- // Use all the 32-bits to encode on a 32-bit platform.
- static const int kMapPageIndexBits =
- 32 - (kMapPageOffsetBits + kForwardingOffsetBits);
-#endif
-
- static const int kMapPageIndexShift = 0;
- static const int kMapPageOffsetShift =
- kMapPageIndexShift + kMapPageIndexBits;
- static const int kForwardingOffsetShift =
- kMapPageOffsetShift + kMapPageOffsetBits;
+ static inline MapWord FromRawValue(uintptr_t value) {
+ return MapWord(value);
+ }
- // Bit masks covering the different parts the encoding.
- static const uintptr_t kMapPageIndexMask =
- (1 << kMapPageOffsetShift) - 1;
- static const uintptr_t kMapPageOffsetMask =
- ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
- static const uintptr_t kForwardingOffsetMask =
- ~(kMapPageIndexMask | kMapPageOffsetMask);
+ inline uintptr_t ToRawValue() {
+ return value_;
+ }
private:
// HeapObject calls the private constructor and directly reads the value.
@@ -1209,6 +1131,7 @@ class HeapObject: public Object {
// information.
inline Map* map();
inline void set_map(Map* value);
+ inline void set_map_unsafe(Map* value);
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
@@ -1216,8 +1139,8 @@ class HeapObject: public Object {
inline void set_map_word(MapWord map_word);
// The Heap the object was allocated in. Used also to access Isolate.
- // This method can not be used during GC, it ASSERTs this.
inline Heap* GetHeap();
+
// Convenience method to get current isolate. This method can be
// accessed only when its result is the same as
// Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
@@ -1246,31 +1169,6 @@ class HeapObject: public Object {
// GC internal.
inline int SizeFromMap(Map* map);
- // Support for the marking heap objects during the marking phase of GC.
- // True if the object is marked live.
- inline bool IsMarked();
-
- // Mutate this object's map pointer to indicate that the object is live.
- inline void SetMark();
-
- // Mutate this object's map pointer to remove the indication that the
- // object is live (ie, partially restore the map pointer).
- inline void ClearMark();
-
- // True if this object is marked as overflowed. Overflowed objects have
- // been reached and marked during marking of the heap, but their children
- // have not necessarily been marked and they have not been pushed on the
- // marking stack.
- inline bool IsOverflowed();
-
- // Mutate this object's map pointer to indicate that the object is
- // overflowed.
- inline void SetOverflow();
-
- // Mutate this object's map pointer to remove the indication that the
- // object is overflowed (ie, partially restore the map pointer).
- inline void ClearOverflow();
-
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
// Does not invoke write barrier, so should only be assigned to
@@ -1294,18 +1192,14 @@ class HeapObject: public Object {
HeapObjectPrint(stdout);
}
void HeapObjectPrint(FILE* out);
+ void PrintHeader(FILE* out, const char* id);
#endif
+
#ifdef DEBUG
void HeapObjectVerify();
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
-#endif
-
-#ifdef OBJECT_PRINT
- void PrintHeader(FILE* out, const char* id);
-#endif
-#ifdef DEBUG
// Verify a pointer is a valid HeapObject pointer that points to object
// areas in the heap.
static void VerifyHeapPointer(Object* p);
@@ -1448,8 +1342,21 @@ class JSReceiver: public HeapObject {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
+ Object* value);
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+ MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+
+ // Set the index'th array element.
+ // Can cause GC, or return failure if GC is required.
+ MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype);
+
+ // Tests for the fast common case for property enumeration.
+ bool IsSimpleEnum();
// Returns the class name ([[Class]] property in the specification).
String* class_name();
@@ -1466,6 +1373,7 @@ class JSReceiver: public HeapObject {
// Can cause a GC.
inline bool HasProperty(String* name);
inline bool HasLocalProperty(String* name);
+ inline bool HasElement(uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -1474,11 +1382,18 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
bool skip_hidden_prototypes);
+ // Retrieves a permanent object identity hash code. The undefined value might
+ // be returned in case no hash was created yet and OMIT_CREATION was used.
+ inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
// Lookup a property. If found, the result is valid and has
// detailed information.
void LocalLookup(String* name, LookupResult* result);
void Lookup(String* name, LookupResult* result);
+ protected:
+ Smi* GenerateIdentityHash();
+
private:
PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
LookupResult* result,
@@ -1525,8 +1440,14 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline ElementsAccessor* GetElementsAccessor();
+ inline bool HasFastSmiOnlyElements();
inline bool HasFastElements();
+ // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
+ // elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and
+ // HasFastElements to HasFastObjectElements.
+ inline bool HasFastTypeElements();
inline bool HasFastDoubleElements();
+ inline bool HasNonStrictArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalPixelElements();
inline bool HasExternalArrayElements();
@@ -1554,6 +1475,11 @@ class JSObject: public JSReceiver {
// a dictionary, and it will stay a dictionary.
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
+ MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
+ Object* structure,
+ String* name);
+
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
String* key,
Object* value,
@@ -1571,8 +1497,6 @@ class JSObject: public JSReceiver {
Object* value,
JSObject* holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
- Object* value);
MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
String* name,
Object* value,
@@ -1660,43 +1584,44 @@ class JSObject: public JSReceiver {
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
- // Instead they are stored on an auxiliary JSObject stored as a local
+ // Instead they are stored in an auxiliary structure kept as a local
// property with a special name Heap::hidden_symbol(). But if the
// receiver is a JSGlobalProxy then the auxiliary object is a property
- // of its prototype.
- //
- // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
- // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
- // holder.
- //
- // These accessors do not touch interceptors or accessors.
- inline bool HasHiddenPropertiesObject();
- inline Object* GetHiddenPropertiesObject();
- MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
- Object* hidden_obj);
-
- // Indicates whether the hidden properties object should be created.
- enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
-
- // Retrieves the hidden properties object.
- //
- // The undefined value might be returned in case no hidden properties object
- // is present and creation was omitted.
- inline bool HasHiddenProperties();
- MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
-
- // Retrieves a permanent object identity hash code.
- //
- // The identity hash is stored as a hidden property. The undefined value might
- // be returned in case no hidden properties object is present and creation was
- // omitted.
- MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
+ // of its prototype, and if it's a detached proxy, then you can't have
+ // hidden properties.
+
+ // Sets a hidden property on this object. Returns this object if successful,
+ // undefined if called on a detached proxy, and a failure if a GC
+ // is required
+ MaybeObject* SetHiddenProperty(String* key, Object* value);
+ // Gets the value of a hidden property with the given key. Returns undefined
+ // if the property doesn't exist (or if called on a detached proxy),
+ // otherwise returns the value set for the key.
+ Object* GetHiddenProperty(String* key);
+ // Deletes a hidden property. Deleting a non-existing property is
+ // considered successful.
+ void DeleteHiddenProperty(String* key);
+ // Returns true if the object has a property with the hidden symbol as name.
+ bool HasHiddenProperties();
+
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
- // Tests for the fast common case for property enumeration.
- bool IsSimpleEnum();
+ inline void ValidateSmiOnlyElements();
+
+ // Makes sure that this object can contain non-smi Object as elements.
+ inline MaybeObject* EnsureCanContainNonSmiElements();
+
+ // Makes sure that this object can contain the specified elements.
+ inline MaybeObject* EnsureCanContainElements(Object** elements,
+ uint32_t count);
+ inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
+ MaybeObject* EnsureCanContainElements(Arguments* arguments,
+ uint32_t first_arg,
+ uint32_t arg_count);
// Do we want to keep the elements in fast case when increasing the
// capacity?
@@ -1711,7 +1636,6 @@ class JSObject: public JSReceiver {
bool CanConvertToFastDoubleElements();
// Tells whether the index'th element is present.
- inline bool HasElement(uint32_t index);
bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
// Computes the new capacity when expanding the elements of a JSObject.
@@ -1747,6 +1671,7 @@ class JSObject: public JSReceiver {
Object* value,
StrictModeFlag strict_mode,
bool check_prototype);
+
MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
@@ -1769,15 +1694,21 @@ class JSObject: public JSReceiver {
// The undefined object if index is out of bounds.
MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
+ enum SetFastElementsCapacityMode {
+ kAllowSmiOnlyElements,
+ kDontAllowSmiOnlyElements
+ };
+
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
- int length);
+ MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+ int capacity,
+ int length,
+ SetFastElementsCapacityMode set_capacity_mode);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
- MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
// objects.
@@ -1800,10 +1731,7 @@ class JSObject: public JSReceiver {
inline int GetInternalFieldOffset(int index);
inline Object* GetInternalField(int index);
inline void SetInternalField(int index, Object* value);
-
- // Lookup a property. If found, the result is valid and has
- // detailed information.
- void LocalLookup(String* name, LookupResult* result);
+ inline void SetInternalField(int index, Smi* value);
// The following lookup functions skip interceptors.
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1860,6 +1788,15 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes);
+ // Returns a new map with all transitions dropped from the object's current
+ // map and the ElementsKind set.
+ static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+ ElementsKind to_kind);
+ MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+ ElementsKind elements_kind);
+
+ MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
+
// Converts a descriptor of any other type to a real field,
// backed by the properties array. Descriptors of visible
// types, such as CONSTANT_FUNCTION, keep their enumeration order.
@@ -1906,6 +1843,10 @@ class JSObject: public JSReceiver {
// dictionary. Returns the backing after conversion.
MUST_USE_RESULT MaybeObject* NormalizeElements();
+ static void UpdateMapCodeCache(Handle<JSObject> object,
+ Handle<String> name,
+ Handle<Code> code);
+
MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
// Transform slow named properties to fast variants.
@@ -1925,11 +1866,14 @@ class JSObject: public JSReceiver {
WriteBarrierMode mode
= UPDATE_WRITE_BARRIER);
- // initializes the body after properties slot, properties slot is
- // initialized by set_properties
- // Note: this call does not update write barrier, it is caller's
- // reponsibility to ensure that *v* can be collected without WB here.
- inline void InitializeBody(int object_size, Object* value);
+ // Initializes the body after properties slot, properties slot is
+ // initialized by set_properties. Fill the pre-allocated fields with
+ // pre_allocated_value and the rest with filler_value.
+ // Note: this call does not update write barrier, the caller is responsible
+ // to ensure that |filler_value| can be collected without WB here.
+ inline void InitializeBody(Map* map,
+ Object* pre_allocated_value,
+ Object* filler_value);
// Check whether this object references another object
bool ReferencesObject(Object* obj);
@@ -1964,6 +1908,10 @@ class JSObject: public JSReceiver {
void PrintElements(FILE* out);
#endif
+ void PrintElementsTransition(
+ FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
+ ElementsKind to_kind, FixedArrayBase* to_elements);
+
#ifdef DEBUG
// Structure for collecting spill information about JSObjects.
class SpillInformation {
@@ -1987,6 +1935,11 @@ class JSObject: public JSReceiver {
#endif
Object* SlowReverseLookup(Object* value);
+ // Getters and setters are stored in a fixed array property.
+ // These are constants for their indices.
+ static const int kGetterIndex = 0;
+ static const int kSetterIndex = 1;
+
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
@@ -2054,6 +2007,18 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool check_prototype);
+ // Searches the prototype chain for a callback setter and sets the property
+ // with the setter if it finds one. The '*found' flag indicates whether
+ // a setter was found or not.
+ // This function can cause GC and can return a failure result with
+ // '*found==true'.
+ MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ bool* found,
+ StrictModeFlag strict_mode);
+
MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
@@ -2092,6 +2057,15 @@ class JSObject: public JSReceiver {
void LookupInDescriptor(String* name, LookupResult* result);
+ // Returns the hidden properties backing store object, currently
+ // a StringDictionary, stored on this object.
+ // If no hidden properties object has been put on this object,
+ // return undefined, unless create_if_absent is true, in which case
+ // a new dictionary is created, added to this object, and returned.
+ MaybeObject* GetHiddenPropertiesDictionary(bool create_if_absent);
+ // Updates the existing hidden properties dictionary.
+ MaybeObject* SetHiddenPropertiesDictionary(StringDictionary* dictionary);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2172,7 +2146,7 @@ class FixedArray: public FixedArrayBase {
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
// consumption.
- static const int kMaxSize = 512 * MB;
+ static const int kMaxSize = 128 * MB * kPointerSize;
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
@@ -2209,7 +2183,9 @@ class FixedArray: public FixedArrayBase {
protected:
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
- static inline void fast_set(FixedArray* array, int index, Object* value);
+ static inline void NoWriteBarrierSet(FixedArray* array,
+ int index,
+ Object* value);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
@@ -2232,6 +2208,9 @@ class FixedDoubleArray: public FixedArrayBase {
// Checking for the hole.
inline bool is_the_hole(int index);
+ // Copy operations
+ MUST_USE_RESULT inline MaybeObject* Copy();
+
// Garbage collection support.
inline static int SizeFor(int length) {
return kHeaderSize + length * kDoubleSize;
@@ -2271,6 +2250,9 @@ class FixedDoubleArray: public FixedArrayBase {
};
+class IncrementalMarking;
+
+
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
// TODO(1399): It should be possible to make room for bit_field3 in the map
@@ -2312,7 +2294,7 @@ class DescriptorArray: public FixedArray {
// Set next enumeration index and flush any enum cache.
void SetNextEnumerationIndex(int value) {
if (!IsEmpty()) {
- fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
+ set(kEnumerationIndexIndex, Smi::FromInt(value));
}
}
bool HasEnumCache() {
@@ -2349,13 +2331,27 @@ class DescriptorArray: public FixedArray {
inline bool IsNullDescriptor(int descriptor_number);
inline bool IsDontEnum(int descriptor_number);
+ class WhitenessWitness {
+ public:
+ inline explicit WhitenessWitness(DescriptorArray* array);
+ inline ~WhitenessWitness();
+
+ private:
+ IncrementalMarking* marking_;
+ };
+
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
- inline void Set(int descriptor_number, Descriptor* desc);
+ inline void Set(int descriptor_number,
+ Descriptor* desc,
+ const WhitenessWitness&);
// Transfer complete descriptor from another descriptor array to
// this one.
- inline void CopyFrom(int index, DescriptorArray* src, int src_index);
+ inline void CopyFrom(int index,
+ DescriptorArray* src,
+ int src_index,
+ const WhitenessWitness&);
// Copy the descriptor array, insert a new descriptor and optionally
// remove map transitions. If the descriptor is already present, it is
@@ -2372,11 +2368,11 @@ class DescriptorArray: public FixedArray {
// Sort the instance descriptors by the hash codes of their keys.
// Does not check for duplicates.
- void SortUnchecked();
+ void SortUnchecked(const WhitenessWitness&);
// Sort the instance descriptors by the hash codes of their keys.
// Checks the result for duplicates.
- void Sort();
+ void Sort(const WhitenessWitness&);
// Search the instance descriptors for given name.
inline int Search(String* name);
@@ -2469,10 +2465,12 @@ class DescriptorArray: public FixedArray {
NULL_DESCRIPTOR;
}
// Swap operation on FixedArray without using write barriers.
- static inline void fast_swap(FixedArray* array, int first, int second);
+ static inline void NoWriteBarrierSwap(FixedArray* array,
+ int first,
+ int second);
// Swap descriptor first and second.
- inline void Swap(int first, int second);
+ inline void NoWriteBarrierSwapDescriptors(int first, int second);
FixedArray* GetContentArray() {
return FixedArray::cast(get(kContentArrayIndex));
@@ -2490,7 +2488,7 @@ class DescriptorArray: public FixedArray {
// encountered and stops when unused elements are encountered.
//
// - Elements with key == undefined have not been used yet.
-// - Elements with key == null have been deleted.
+// - Elements with key == the_hole have been deleted.
//
// The hash table class is parameterized with a Shape and a Key.
// Shape must be a class with the following interface:
@@ -2559,10 +2557,10 @@ class HashTable: public FixedArray {
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
- // Tells whether k is a real key. Null and undefined are not allowed
+ // Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
bool IsKey(Object* k) {
- return !k->IsNull() && !k->IsUndefined();
+ return !k->IsTheHole() && !k->IsUndefined();
}
// Garbage collection support.
@@ -2614,12 +2612,12 @@ class HashTable: public FixedArray {
// Update the number of elements in the hash table.
void SetNumberOfElements(int nof) {
- fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
+ set(kNumberOfElementsIndex, Smi::FromInt(nof));
}
// Update the number of deleted elements in the hash table.
void SetNumberOfDeletedElements(int nod) {
- fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+ set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
}
// Sets the capacity of the hash table.
@@ -2629,7 +2627,7 @@ class HashTable: public FixedArray {
// and non-zero.
ASSERT(capacity > 0);
ASSERT(capacity <= kMaxCapacity);
- fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
+ set(kCapacityIndex, Smi::FromInt(capacity));
}
@@ -2837,7 +2835,7 @@ class Dictionary: public HashTable<Shape, Key> {
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
- this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
+ this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
}
int NextEnumerationIndex() {
@@ -2917,7 +2915,7 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
JSObject* obj,
int unused_property_fields);
- // Find entry for key otherwise return kNotFound. Optimzed version of
+ // Find entry for key, otherwise return kNotFound. Optimized version of
// HashTable::FindEntry.
int FindEntry(String* key);
};
@@ -2968,9 +2966,6 @@ class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
// requires_slow_elements returns false.
inline uint32_t max_number_key();
- // Remove all entries were key is a number and (from <= key && key < to).
- void RemoveNumberEntries(uint32_t from, uint32_t to);
-
// Bit masks.
static const int kRequiresSlowElementsMask = 1;
static const int kRequiresSlowElementsTagSize = 1;
@@ -2978,20 +2973,41 @@ class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
};
+template <int entrysize>
class ObjectHashTableShape {
public:
- static inline bool IsMatch(JSObject* key, Object* other);
- static inline uint32_t Hash(JSObject* key);
- static inline uint32_t HashForObject(JSObject* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
+ static inline bool IsMatch(Object* key, Object* other);
+ static inline uint32_t Hash(Object* key);
+ static inline uint32_t HashForObject(Object* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key);
static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
+ static const int kEntrySize = entrysize;
};
-// ObjectHashTable maps keys that are JavaScript objects to object values by
+// ObjectHashSet holds keys that are arbitrary objects by using the identity
+// hash of the key for hashing purposes.
+class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
+ public:
+ static inline ObjectHashSet* cast(Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<ObjectHashSet*>(obj);
+ }
+
+ // Looks up whether the given key is part of this hash set.
+ bool Contains(Object* key);
+
+ // Adds the given key to this hash set.
+ MUST_USE_RESULT MaybeObject* Add(Object* key);
+
+ // Removes the given key from this hash set.
+ MUST_USE_RESULT MaybeObject* Remove(Object* key);
+};
+
+
+// ObjectHashTable maps keys that are arbitrary objects to object values by
// using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
+class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
public:
static inline ObjectHashTable* cast(Object* obj) {
ASSERT(obj->IsHashTable());
@@ -3000,18 +3016,17 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
// Looks up the value associated with the given key. The undefined value is
// returned in case the key is not present.
- Object* Lookup(JSObject* key);
+ Object* Lookup(Object* key);
// Adds (or overwrites) the value associated with the given key. Mapping a
// key to the undefined value causes removal of the whole entry.
- MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
+ MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
private:
friend class MarkCompactCollector;
- void AddEntry(int entry, JSObject* key, Object* value);
- void RemoveEntry(int entry, Heap* heap);
- inline void RemoveEntry(int entry);
+ void AddEntry(int entry, Object* key, Object* value);
+ void RemoveEntry(int entry);
// Returns the index to the value of an entry.
static inline int EntryToValueIndex(int entry) {
@@ -3058,6 +3073,207 @@ class JSFunctionResultCache: public FixedArray {
};
+// ScopeInfo represents information about different scopes of a source
+// program and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in ScopeInfo objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+
+// This object provides quick access to scope info details for runtime
+// routines.
+class ScopeInfo : public FixedArray {
+ public:
+ static inline ScopeInfo* cast(Object* object);
+
+ // Return the type of this scope.
+ ScopeType Type();
+
+ // Does this scope call eval?
+ bool CallsEval();
+
+ // Return the language mode of this scope.
+ LanguageMode language_mode();
+
+ // Does this scope make a non-strict eval call?
+ bool CallsNonStrictEval() {
+ return CallsEval() && (language_mode() == CLASSIC_MODE);
+ }
+
+ // Return the total number of locals allocated on the stack and in the
+ // context. This includes the parameters that are allocated in the context.
+ int LocalCount();
+
+ // Return the number of stack slots for code. This number consists of two
+ // parts:
+ // 1. One stack slot per stack allocated local.
+ // 2. One stack slot for the function name if it is stack allocated.
+ int StackSlotCount();
+
+ // Return the number of context slots for code if a context is allocated. This
+ // number consists of three parts:
+ // 1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
+ // 2. One context slot per context allocated local.
+ // 3. One context slot for the function name if it is context allocated.
+ // Parameters allocated in the context count as context allocated locals. If
+ // no contexts are allocated for this scope ContextLength returns 0.
+ int ContextLength();
+
+ // Is this scope the scope of a named function expression?
+ bool HasFunctionName();
+
+ // Return if this has context allocated locals.
+ bool HasHeapAllocatedLocals();
+
+ // Return if contexts are allocated for this scope.
+ bool HasContext();
+
+ // Return the function_name if present.
+ String* FunctionName();
+
+ // Return the name of the given parameter.
+ String* ParameterName(int var);
+
+ // Return the name of the given local.
+ String* LocalName(int var);
+
+ // Return the name of the given stack local.
+ String* StackLocalName(int var);
+
+ // Return the name of the given context local.
+ String* ContextLocalName(int var);
+
+ // Return the mode of the given context local.
+ VariableMode ContextLocalMode(int var);
+
+ // Return the initialization flag of the given context local.
+ InitializationFlag ContextLocalInitFlag(int var);
+
+ // Lookup support for serialized scope info. Returns the
+ // the stack slot index for a given slot name if the slot is
+ // present; otherwise returns a value < 0. The name must be a symbol
+ // (canonicalized).
+ int StackSlotIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // context slot index for a given slot name if the slot is present; otherwise
+ // returns a value < 0. The name must be a symbol (canonicalized).
+ // If the slot is present and mode != NULL, sets *mode to the corresponding
+ // mode for that variable.
+ int ContextSlotIndex(String* name,
+ VariableMode* mode,
+ InitializationFlag* init_flag);
+
+ // Lookup support for serialized scope info. Returns the
+ // parameter index for a given parameter name if the parameter is present;
+ // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+ int ParameterIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // function context slot index if the function name is present (named
+ // function expressions, only), otherwise returns a value < 0. The name
+ // must be a symbol (canonicalized).
+ int FunctionContextSlotIndex(String* name, VariableMode* mode);
+
+ static Handle<ScopeInfo> Create(Scope* scope);
+
+ // Serializes empty scope info.
+ static ScopeInfo* Empty();
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ // The layout of the static part of a ScopeInfo is as follows. Each entry is
+ // numeric and occupies one array slot.
+ // 1. A set of properties of the scope
+ // 2. The number of parameters. This only applies to function scopes. For
+ // non-function scopes this is 0.
+ // 3. The number of non-parameter variables allocated on the stack.
+ // 4. The number of non-parameter and parameter variables allocated in the
+ // context.
+#define FOR_EACH_NUMERIC_FIELD(V) \
+ V(Flags) \
+ V(ParameterCount) \
+ V(StackLocalCount) \
+ V(ContextLocalCount)
+
+#define FIELD_ACCESSORS(name) \
+ void Set##name(int value) { \
+ set(k##name, Smi::FromInt(value)); \
+ } \
+ int name() { \
+ if (length() > 0) { \
+ return Smi::cast(get(k##name))->value(); \
+ } else { \
+ return 0; \
+ } \
+ }
+ FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
+ private:
+ enum {
+#define DECL_INDEX(name) k##name,
+ FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
+#undef DECL_INDEX
+#undef FOR_EACH_NUMERIC_FIELD
+ kVariablePartIndex
+ };
+
+ // The layout of the variable part of a ScopeInfo is as follows:
+ // 1. ParameterEntries:
+ // This part stores the names of the parameters for function scopes. One
+ // slot is used per parameter, so in total this part occupies
+ // ParameterCount() slots in the array. For other scopes than function
+ // scopes ParameterCount() is 0.
+ // 2. StackLocalEntries:
+ // Contains the names of local variables that are allocated on the stack,
+ // in increasing order of the stack slot index. One slot is used per stack
+ // local, so in total this part occupies StackLocalCount() slots in the
+ // array.
+ // 3. ContextLocalNameEntries:
+ // Contains the names of local variables and parameters that are allocated
+ // in the context. They are stored in increasing order of the context slot
+ // index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
+ // context local, so in total this part occupies ContextLocalCount() slots
+ // in the array.
+ // 4. ContextLocalInfoEntries:
+ // Contains the variable modes and initialization flags corresponding to
+ // the context locals in ContextLocalNameEntries. One slot is used per
+ // context local, so in total this part occupies ContextLocalCount()
+ // slots in the array.
+ // 5. FunctionNameEntryIndex:
+ // If the scope belongs to a named function expression this part contains
+ // information about the function variable. It always occupies two array
+ // slots: a. The name of the function variable.
+ // b. The context or stack slot index for the variable.
+ int ParameterEntriesIndex();
+ int StackLocalEntriesIndex();
+ int ContextLocalNameEntriesIndex();
+ int ContextLocalInfoEntriesIndex();
+ int FunctionNameEntryIndex();
+
+ // Location of the function variable for named function expressions.
+ enum FunctionVariableInfo {
+ NONE, // No function name present.
+ STACK, // Function
+ CONTEXT,
+ UNUSED
+ };
+
+ // Properties of scopes.
+ class TypeField: public BitField<ScopeType, 0, 3> {};
+ class CallsEvalField: public BitField<bool, 3, 1> {};
+ class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
+ class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
+ class FunctionVariableMode: public BitField<VariableMode, 8, 3> {};
+
+ // BitFields representing the encoded information for context locals in the
+ // ContextLocalInfoEntries part.
+ class ContextLocalMode: public BitField<VariableMode, 0, 3> {};
+ class ContextLocalInitFlag: public BitField<InitializationFlag, 3, 1> {};
+};
+
+
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
@@ -3079,11 +3295,12 @@ class NormalizedMapCache: public FixedArray {
};
-// ByteArray represents fixed sized byte arrays. Used by the outside world,
-// such as PCRE, and also by the memory allocator and garbage collector to
-// fill in free blocks in the heap.
+// ByteArray represents fixed sized byte arrays. Used for the relocation info
+// that is attached to code objects.
class ByteArray: public FixedArrayBase {
public:
+ inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
@@ -3140,6 +3357,41 @@ class ByteArray: public FixedArrayBase {
};
+// FreeSpace represents fixed sized areas of the heap that are not currently in
+// use. Used by the heap and GC.
+class FreeSpace: public HeapObject {
+ public:
+ // [size]: size of the free space including the header.
+ inline int size();
+ inline void set_size(int value);
+
+ inline int Size() { return size(); }
+
+ // Casting.
+ static inline FreeSpace* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void FreeSpacePrint() {
+ FreeSpacePrint(stdout);
+ }
+ void FreeSpacePrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void FreeSpaceVerify();
+#endif
+
+ // Layout description.
+ // Size is smi tagged when it is stored.
+ static const int kSizeOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kSizeOffset + kPointerSize;
+
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
+};
+
+
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
// implement the CanvasArray types being defined in the WebGL
@@ -3468,7 +3720,8 @@ class DeoptimizationInputData: public FixedArray {
static const int kAstIdOffset = 0;
static const int kTranslationIndexOffset = 1;
static const int kArgumentsStackHeightOffset = 2;
- static const int kDeoptEntrySize = 3;
+ static const int kPcOffset = 3;
+ static const int kDeoptEntrySize = 4;
// Simple element accessors.
#define DEFINE_ELEMENT_ACCESSORS(name, type) \
@@ -3504,6 +3757,7 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ENTRY_ACCESSORS(AstId, Smi)
DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+ DEFINE_ENTRY_ACCESSORS(Pc, Smi)
#undef DEFINE_ENTRY_ACCESSORS
@@ -3630,6 +3884,9 @@ class Code: public HeapObject {
DECL_ACCESSORS(relocation_info, ByteArray)
void InvalidateRelocation();
+ // [handler_table]: Fixed array containing offsets of exception handlers.
+ DECL_ACCESSORS(handler_table, FixedArray)
+
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
@@ -3673,6 +3930,11 @@ class Code: public HeapObject {
inline int major_key();
inline void set_major_key(int value);
+ // For stubs, tells whether they should always exist, so that they can be
+ // called from other stubs.
+ inline bool is_pregenerated();
+ inline void set_is_pregenerated(bool value);
+
// [optimizable]: For FUNCTION kind, tells if it is optimizable.
inline bool optimizable();
inline void set_optimizable(bool value);
@@ -3687,6 +3949,11 @@ class Code: public HeapObject {
inline bool has_debug_break_slots();
inline void set_has_debug_break_slots(bool value);
+ // [compiled_with_optimizing]: For FUNCTION kind, tells if it has
+ // been compiled with IsOptimizing set to true.
+ inline bool is_compiled_optimizable();
+ inline void set_compiled_optimizable(bool value);
+
// [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
// how long the function has been marked for OSR and therefore which
// level of loop nesting we are willing to do on-stack replacement
@@ -3732,6 +3999,11 @@ class Code: public HeapObject {
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
+ // For kind STUB, major_key == CallFunction, tells whether there is
+ // a function cache in the instruction stream.
+ inline bool has_function_cache();
+ inline void set_has_function_cache(bool flag);
+
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -3836,10 +4108,6 @@ class Code: public HeapObject {
void CodeVerify();
#endif
- // Returns the isolate/heap this code object belongs to.
- inline Isolate* isolate();
- inline Heap* heap();
-
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -3847,8 +4115,9 @@ class Code: public HeapObject {
// Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
+ static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
- kRelocationInfoOffset + kPointerSize;
+ kHandlerTableOffset + kPointerSize;
static const int kNextCodeFlushingCandidateOffset =
kDeoptimizationDataOffset + kPointerSize;
static const int kFlagsOffset =
@@ -3875,11 +4144,13 @@ class Code: public HeapObject {
static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
+ static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
static const int kFullCodeFlags = kOptimizableOffset + 1;
class FullCodeFlagsHasDeoptimizationSupportField:
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
+ class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
@@ -3894,9 +4165,10 @@ class Code: public HeapObject {
class KindField: public BitField<Kind, 7, 4> {};
class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
+ class IsPregeneratedField: public BitField<bool, 14, 1> {};
// Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 14;
+ static const int kArgumentsCountShift = 15;
static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
static const int kFlagsNotUsedInLookup =
@@ -4032,8 +4304,12 @@ class Map: public HeapObject {
(bit_field2() & kElementsKindMask) >> kElementsKindShift);
}
+ // Tells whether the instance has fast elements that are only Smis.
+ inline bool has_fast_smi_only_elements() {
+ return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
+ }
+
// Tells whether the instance has fast elements.
- // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
inline bool has_fast_elements() {
return elements_kind() == FAST_ELEMENTS;
}
@@ -4042,6 +4318,10 @@ class Map: public HeapObject {
return elements_kind() == FAST_DOUBLE_ELEMENTS;
}
+ inline bool has_non_strict_arguments_elements() {
+ return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ }
+
inline bool has_external_array_elements() {
ElementsKind kind(elements_kind());
return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
@@ -4052,6 +4332,9 @@ class Map: public HeapObject {
return elements_kind() == DICTIONARY_ELEMENTS;
}
+ static bool IsValidElementsTransition(ElementsKind from_kind,
+ ElementsKind to_kind);
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -4100,6 +4383,7 @@ class Map: public HeapObject {
// 1 + 2 * i: prototype
// 2 + 2 * i: target map
DECL_ACCESSORS(prototype_transitions, FixedArray)
+
inline FixedArray* unchecked_prototype_transitions();
static const int kProtoTransitionHeaderSize = 1;
@@ -4109,14 +4393,14 @@ class Map: public HeapObject {
static const int kProtoTransitionMapOffset = 1;
inline int NumberOfProtoTransitions() {
- FixedArray* cache = unchecked_prototype_transitions();
+ FixedArray* cache = prototype_transitions();
if (cache->length() == 0) return 0;
return
Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
}
inline void SetNumberOfProtoTransitions(int value) {
- FixedArray* cache = unchecked_prototype_transitions();
+ FixedArray* cache = prototype_transitions();
ASSERT(cache->length() != 0);
cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
Smi::FromInt(value));
@@ -4138,27 +4422,6 @@ class Map: public HeapObject {
// instance descriptors.
MUST_USE_RESULT MaybeObject* CopyDropTransitions();
- // Returns this map if it already has elements that are fast, otherwise
- // returns a copy of the map, with all transitions dropped from the
- // descriptors and the ElementsKind set to FAST_ELEMENTS.
- MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
-
- // Returns this map if it already has fast elements that are doubles,
- // otherwise returns a copy of the map, with all transitions dropped from the
- // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
- MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
-
- // Returns this map if already has dictionary elements, otherwise returns a
- // copy of the map, with all transitions dropped from the descriptors and the
- // ElementsKind set to DICTIONARY_ELEMENTS.
- MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
-
- // Returns a new map with all transitions dropped from the descriptors and the
- // ElementsKind set.
- MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
- ElementsKind elements_kind,
- bool safe_to_add_transition);
-
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -4180,6 +4443,9 @@ class Map: public HeapObject {
inline void ClearCodeCache(Heap* heap);
// Update code cache.
+ static void UpdateCodeCache(Handle<Map> map,
+ Handle<String> name,
+ Handle<Code> code);
MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
// Returns the found code or undefined if absent.
@@ -4197,6 +4463,8 @@ class Map: public HeapObject {
// This is undone in MarkCompactCollector::ClearNonLiveTransitions().
void CreateBackPointers();
+ void CreateOneBackPointer(Map* transition_target);
+
// Set all map transitions from this map to dead maps to null.
// Also, restore the original prototype on the targets of these
// transitions, so that we do not process this map again while
@@ -4218,6 +4486,31 @@ class Map: public HeapObject {
return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
}
+ // Returns the contents of this map's descriptor array for the given string.
+ // May return NULL. |safe_to_add_transition| is set to false and NULL
+ // is returned if adding transitions is not allowed.
+ Object* GetDescriptorContents(String* sentinel_name,
+ bool* safe_to_add_transitions);
+
+ // Returns the map that this map transitions to if its elements_kind
+ // is changed to |elements_kind|, or NULL if no such map is cached yet.
+ // |safe_to_add_transitions| is set to false if adding transitions is not
+ // allowed.
+ Map* LookupElementsTransitionMap(ElementsKind elements_kind,
+ bool* safe_to_add_transition);
+
+ // Adds an entry to this map's descriptor array for a transition to
+ // |transitioned_map| when its elements_kind is changed to |elements_kind|.
+ MaybeObject* AddElementsTransition(ElementsKind elements_kind,
+ Map* transitioned_map);
+
+ // Returns the transitioned map for this map with the most generic
+ // elements_kind that's found in |candidates|, or null handle if no match is
+ // found at all.
+ Handle<Map> FindTransitionedMap(MapHandleList* candidates);
+ Map* FindTransitionedMap(MapList* candidates);
+
+
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline void MapPrint() {
@@ -4233,10 +4526,6 @@ class Map: public HeapObject {
inline int visitor_id();
inline void set_visitor_id(int visitor_id);
- // Returns the isolate/heap this map belongs to.
- inline Isolate* isolate();
- inline Heap* heap();
-
typedef void (*TraverseCallback)(Map* map, void* data);
void TraverseTransitionTree(TraverseCallback callback, void* data);
@@ -4273,7 +4562,7 @@ class Map: public HeapObject {
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
- // being continiously allocated.
+ // being continuously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset =
Map::kPrototypeTransitionsOffset + kPointerSize;
@@ -4313,7 +4602,7 @@ class Map: public HeapObject {
static const int kStringWrapperSafeForDefaultValueOf = 2;
static const int kAttachedToSharedFunctionInfo = 3;
// No bits can be used after kElementsKindFirstBit, they are all reserved for
- // storing ElementKind. for anything other than storing the ElementKind.
+ // storing ElementKind.
static const int kElementsKindShift = 4;
static const int kElementsKindBitCount = 4;
@@ -4322,6 +4611,9 @@ class Map: public HeapObject {
((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
(FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+ static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
+ static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
+ Map::kElementsKindShift) - 1;
// Bit positions for bit field 3
static const int kIsShared = 0;
@@ -4336,6 +4628,7 @@ class Map: public HeapObject {
kSize> BodyDescriptor;
private:
+ String* elements_transition_sentinel_name();
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -4503,7 +4796,7 @@ class SharedFunctionInfo: public HeapObject {
DECL_ACCESSORS(code, Code)
// [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, SerializedScopeInfo)
+ DECL_ACCESSORS(scope_info, ScopeInfo)
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
@@ -4725,8 +5018,20 @@ class SharedFunctionInfo: public HeapObject {
// spending time attempting to optimize it again.
DECL_BOOLEAN_ACCESSORS(optimization_disabled)
- // Indicates whether the function is a strict mode function.
- DECL_BOOLEAN_ACCESSORS(strict_mode)
+ // Indicates the language mode of the function's code as defined by the
+ // current harmony drafts for the next ES language standard. Possible
+ // values are:
+ // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
+ // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
+ // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
+ inline LanguageMode language_mode();
+ inline void set_language_mode(LanguageMode language_mode);
+
+ // Indicates whether the language mode of this function is CLASSIC_MODE.
+ inline bool is_classic_mode();
+
+ // Indicates whether the language mode of this function is EXTENDED_MODE.
+ inline bool is_extended_mode();
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -4818,6 +5123,13 @@ class SharedFunctionInfo: public HeapObject {
void SharedFunctionInfoVerify();
#endif
+ // Helpers to compile the shared code. Returns true on success, false on
+ // failure (e.g., stack overflow during compilation).
+ static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag);
+ static bool CompileLazy(Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag);
+
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -4942,6 +5254,7 @@ class SharedFunctionInfo: public HeapObject {
kCodeAgeShift,
kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
kStrictModeFunction,
+ kExtendedModeFunction,
kUsesArguments,
kHasDuplicateParameters,
kNative,
@@ -4968,22 +5281,30 @@ class SharedFunctionInfo: public HeapObject {
public:
// Constants for optimizing codegen for strict mode function and
// native tests.
- // Allows to use byte-widgh instructions.
+ // Allows to use byte-width instructions.
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kExtendedModeBitWithinByte =
+ (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
static const int kNativeBitWithinByte =
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+ static const int kExtendedModeByteOffset = kCompilerHintsOffset +
+ (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
static const int kNativeByteOffset = kCompilerHintsOffset +
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+ static const int kExtendedModeByteOffset = kCompilerHintsOffset +
+ (kCompilerHintsSize - 1) -
+ ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -5039,6 +5360,14 @@ class JSFunction: public JSObject {
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
+ // Helpers to compile this function. Returns true on success, false on
+ // failure (e.g., stack overflow during compilation).
+ static bool CompileLazy(Handle<JSFunction> function,
+ ClearExceptionFlag flag);
+ static bool CompileOptimized(Handle<JSFunction> function,
+ int osr_ast_id,
+ ClearExceptionFlag flag);
+
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
@@ -5046,7 +5375,8 @@ class JSFunction: public JSObject {
// Check whether or not this function is inlineable.
bool IsInlineable();
- // [literals]: Fixed array holding the materialized literals.
+ // [literals_or_bindings]: Fixed array holding either
+ // the materialized literals or the bindings of a bound function.
//
// If the function contains object, regexp or array literals, the
// literals array prefix contains the object, regexp, and array
@@ -5055,7 +5385,17 @@ class JSFunction: public JSObject {
// or array functions. Performing a dynamic lookup, we might end up
// using the functions from a new context that we should not have
// access to.
- DECL_ACCESSORS(literals, FixedArray)
+ //
+ // On bound functions, the array is a (copy-on-write) fixed-array containing
+ // the function that was bound, bound this-value and any bound
+ // arguments. Bound functions never contain literals.
+ DECL_ACCESSORS(literals_or_bindings, FixedArray)
+
+ inline FixedArray* literals();
+ inline void set_literals(FixedArray* literals);
+
+ inline FixedArray* function_bindings();
+ inline void set_function_bindings(FixedArray* bindings);
// The initial map for an object created by this constructor.
inline Map* initial_map();
@@ -5143,6 +5483,11 @@ class JSFunction: public JSObject {
static const int kLiteralsPrefixSize = 1;
static const int kLiteralGlobalContextIndex = 0;
+ // Layout of the bound-function binding array.
+ static const int kBoundFunctionIndex = 0;
+ static const int kBoundThisIndex = 1;
+ static const int kBoundArgumentsStartIndex = 2;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
};
@@ -5215,6 +5560,11 @@ class GlobalObject: public JSObject {
}
// Ensure that the global object has a cell for the given property name.
+ static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
+ Handle<GlobalObject> global,
+ Handle<String> name);
+ // TODO(kmillikin): This function can be eliminated once the stub cache is
+ // full handlified (and the static helper can be written directly).
MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
// Casting.
@@ -5227,8 +5577,6 @@ class GlobalObject: public JSObject {
static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
private:
- friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
};
@@ -5571,12 +5919,16 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
public:
// Find cached value for a string key, otherwise return null.
Object* Lookup(String* src);
- Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
+ Object* LookupEval(String* src,
+ Context* context,
+ LanguageMode language_mode,
+ int scope_position);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
MaybeObject* Put(String* src, Object* value);
MaybeObject* PutEval(String* src,
Context* context,
- SharedFunctionInfo* value);
+ SharedFunctionInfo* value,
+ int scope_position);
MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
// Remove given value from cache.
@@ -5689,10 +6041,17 @@ class PolymorphicCodeCache: public Struct {
public:
DECL_ACCESSORS(cache, Object)
- MUST_USE_RESULT MaybeObject* Update(MapList* maps,
+ static void Update(Handle<PolymorphicCodeCache> cache,
+ MapHandleList* maps,
+ Code::Flags flags,
+ Handle<Code> code);
+
+ MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps,
Code::Flags flags,
Code* code);
- Object* Lookup(MapList* maps, Code::Flags flags);
+
+ // Returns an undefined value if the entry is not found.
+ Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
static inline PolymorphicCodeCache* cast(Object* obj);
@@ -5717,8 +6076,11 @@ class PolymorphicCodeCache: public Struct {
class PolymorphicCodeCacheHashTable
: public HashTable<CodeCacheHashTableShape, HashTableKey*> {
public:
- Object* Lookup(MapList* maps, int code_kind);
- MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
+ Object* Lookup(MapHandleList* maps, int code_kind);
+
+ MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps,
+ int code_kind,
+ Code* code);
static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
@@ -5989,7 +6351,8 @@ class String: public HeapObject {
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
int* length_output = 0);
- int Utf8Length();
+ inline int Utf8Length() { return Utf8Length(this, 0, length()); }
+ static int Utf8Length(String* input, int from, int to);
// Return a 16 bit Unicode representation of the string.
// The string should be nearly flat, otherwise the performance of
@@ -6226,6 +6589,9 @@ class SeqString: public String {
// Casting.
static inline SeqString* cast(Object* obj);
+ // Layout description.
+ static const int kHeaderSize = String::kSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
@@ -6259,12 +6625,8 @@ class SeqAsciiString: public SeqString {
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
- // Layout description.
- static const int kHeaderSize = String::kSize;
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
// Maximal memory usage for a single sequential ASCII string.
- static const int kMaxSize = 512 * MB;
+ static const int kMaxSize = 512 * MB - 1;
// Maximal length of a single sequential ASCII string.
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize);
@@ -6313,12 +6675,8 @@ class SeqTwoByteString: public SeqString {
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
}
- // Layout description.
- static const int kHeaderSize = String::kSize;
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
// Maximal memory usage for a single sequential two-byte string.
- static const int kMaxSize = 512 * MB;
+ static const int kMaxSize = 512 * MB - 1;
// Maximal length of a single sequential two-byte string.
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
@@ -6462,7 +6820,12 @@ class ExternalString: public String {
// Layout description.
static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kSize = kResourceOffset + kPointerSize;
+ static const int kShortSize = kResourceOffset + kPointerSize;
+ static const int kResourceDataOffset = kResourceOffset + kPointerSize;
+ static const int kSize = kResourceDataOffset + kPointerSize;
+
+ // Return whether external string is short (data pointer is not cached).
+ inline bool is_short();
STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
@@ -6480,11 +6843,19 @@ class ExternalAsciiString: public ExternalString {
typedef v8::String::ExternalAsciiStringResource Resource;
// The underlying resource.
- inline Resource* resource();
- inline void set_resource(Resource* buffer);
+ inline const Resource* resource();
+ inline void set_resource(const Resource* buffer);
+
+ // Update the pointer cache to the external character array.
+ // The cached pointer is always valid, as the external character array does =
+ // not move during lifetime. Deserialization is the only exception, after
+ // which the pointer cache has to be refreshed.
+ inline void update_data_cache();
+
+ inline const char* GetChars();
// Dispatched behavior.
- uint16_t ExternalAsciiStringGet(int index);
+ inline uint16_t ExternalAsciiStringGet(int index);
// Casting.
static inline ExternalAsciiString* cast(Object* obj);
@@ -6517,14 +6888,22 @@ class ExternalTwoByteString: public ExternalString {
typedef v8::String::ExternalStringResource Resource;
// The underlying string resource.
- inline Resource* resource();
- inline void set_resource(Resource* buffer);
+ inline const Resource* resource();
+ inline void set_resource(const Resource* buffer);
+
+ // Update the pointer cache to the external character array.
+ // The cached pointer is always valid, as the external character array does =
+ // not move during lifetime. Deserialization is the only exception, after
+ // which the pointer cache has to be refreshed.
+ inline void update_data_cache();
+
+ inline const uint16_t* GetChars();
// Dispatched behavior.
- uint16_t ExternalTwoByteStringGet(int index);
+ inline uint16_t ExternalTwoByteStringGet(int index);
// For regexp code.
- const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+ inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
// Casting.
static inline ExternalTwoByteString* cast(Object* obj);
@@ -6669,6 +7048,9 @@ class Oddball: public HeapObject {
static const byte kUndefined = 5;
static const byte kOther = 6;
+ // The ToNumber value of a hidden oddball is a negative smi.
+ static const int kLeastHiddenOddballNumber = -5;
+
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;
@@ -6704,10 +7086,6 @@ class JSGlobalPropertyCell: public HeapObject {
kValueOffset + kPointerSize,
kSize> BodyDescriptor;
- // Returns the isolate/heap this cell object belongs to.
- inline Isolate* isolate();
- inline Heap* heap();
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
};
@@ -6719,25 +7097,56 @@ class JSProxy: public JSReceiver {
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
+ // [hash]: The hash code property (undefined if not initialized yet).
+ DECL_ACCESSORS(hash, Object)
+
// Casting.
static inline JSProxy* cast(Object* obj);
bool HasPropertyWithHandler(String* name);
+ bool HasElementWithHandler(uint32_t index);
+
+ MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
+ Object* receiver,
+ String* name);
+ MUST_USE_RESULT MaybeObject* GetElementWithHandler(
+ Object* receiver,
+ uint32_t index);
MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetElementWithHandler(
+ uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode);
+
+ // If the handler defines an accessor property, invoke its setter
+ // (or throw if only a getter exists) and set *found to true. Otherwise false.
+ MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool* found);
MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
String* name,
DeleteMode mode);
+ MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
+ uint32_t index,
+ DeleteMode mode);
MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
JSReceiver* receiver,
- String* name,
- bool* has_exception);
+ String* name);
+ MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
+ JSReceiver* receiver,
+ uint32_t index);
+
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
// Turn this into an (empty) JSObject.
void Fix();
@@ -6745,6 +7154,13 @@ class JSProxy: public JSReceiver {
// Initializes the body after the handler slot.
inline void InitializeBody(int object_size, Object* value);
+ // Invoke a trap by name. If the trap does not exist on this's handler,
+ // but derived_trap is non-NULL, invoke that instead. May cause GC.
+ Handle<Object> CallTrap(const char* name,
+ Handle<Object> derived_trap,
+ int argc,
+ Handle<Object> args[]);
+
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline void JSProxyPrint() {
@@ -6760,7 +7176,8 @@ class JSProxy: public JSReceiver {
// size as a virgin JSObject. This is essential for becoming a JSObject
// upon freeze.
static const int kHandlerOffset = HeapObject::kHeaderSize;
- static const int kPaddingOffset = kHandlerOffset + kPointerSize;
+ static const int kHashOffset = kHandlerOffset + kPointerSize;
+ static const int kPaddingOffset = kHashOffset + kPointerSize;
static const int kSize = JSObject::kHeaderSize;
static const int kHeaderSize = kPaddingOffset;
static const int kPaddingSize = kSize - kPaddingOffset;
@@ -6768,7 +7185,7 @@ class JSProxy: public JSReceiver {
STATIC_CHECK(kPaddingSize >= 0);
typedef FixedBodyDescriptor<kHandlerOffset,
- kHandlerOffset + kPointerSize,
+ kPaddingOffset,
kSize> BodyDescriptor;
private:
@@ -6799,7 +7216,7 @@ class JSFunctionProxy: public JSProxy {
#endif
// Layout description.
- static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
+ static const int kCallTrapOffset = JSProxy::kPaddingOffset;
static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
static const int kSize = JSFunction::kSize;
@@ -6816,18 +7233,69 @@ class JSFunctionProxy: public JSProxy {
};
+// The JSSet describes EcmaScript Harmony sets
+class JSSet: public JSObject {
+ public:
+ // [set]: the backing hash set containing keys.
+ DECL_ACCESSORS(table, Object)
+
+ // Casting.
+ static inline JSSet* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void JSSetPrint() {
+ JSSetPrint(stdout);
+ }
+ void JSSetPrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void JSSetVerify();
+#endif
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
+};
+
+
+// The JSMap describes EcmaScript Harmony maps
+class JSMap: public JSObject {
+ public:
+ // [table]: the backing hash table mapping keys to values.
+ DECL_ACCESSORS(table, Object)
+
+ // Casting.
+ static inline JSMap* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void JSMapPrint() {
+ JSMapPrint(stdout);
+ }
+ void JSMapPrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void JSMapVerify();
+#endif
+
+ static const int kTableOffset = JSObject::kHeaderSize;
+ static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+};
+
+
// The JSWeakMap describes EcmaScript Harmony weak maps
class JSWeakMap: public JSObject {
public:
// [table]: the backing hash table mapping keys to values.
- DECL_ACCESSORS(table, ObjectHashTable)
+ DECL_ACCESSORS(table, Object)
// [next]: linked list of encountered weak maps during GC.
DECL_ACCESSORS(next, Object)
- // Unchecked accessors to be used during GC.
- inline ObjectHashTable* unchecked_table();
-
// Casting.
static inline JSWeakMap* cast(Object* obj);
@@ -6856,8 +7324,8 @@ class JSWeakMap: public JSObject {
class Foreign: public HeapObject {
public:
// [address]: field containing the address.
- inline Address address();
- inline void set_address(Address value);
+ inline Address foreign_address();
+ inline void set_foreign_address(Address value);
// Casting.
static inline Foreign* cast(Object* obj);
@@ -6880,10 +7348,10 @@ class Foreign: public HeapObject {
// Layout description.
- static const int kAddressOffset = HeapObject::kHeaderSize;
- static const int kSize = kAddressOffset + kPointerSize;
+ static const int kForeignAddressOffset = HeapObject::kHeaderSize;
+ static const int kSize = kForeignAddressOffset + kPointerSize;
- STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
+ STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -6913,7 +7381,7 @@ class JSArray: public JSObject {
MUST_USE_RESULT MaybeObject* Initialize(int capacity);
// Set the content of the array to the content of storage.
- inline void SetContent(FixedArray* storage);
+ inline MaybeObject* SetContent(FixedArray* storage);
// Casting.
static inline JSArray* cast(Object* obj);
@@ -7129,7 +7597,6 @@ class TemplateInfo: public Struct {
static const int kPropertyListOffset = kTagOffset + kPointerSize;
static const int kHeaderSize = kPropertyListOffset + kPointerSize;
protected:
- friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
};
@@ -7433,11 +7900,16 @@ class ObjectVisitor BASE_EMBEDDED {
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+ // Visit pointer embedded into a code object.
+ virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+
// Visits a contiguous arrays of external references (references to the C++
// heap) in the half-open range [start, end). Any or all of the values
// may be modified on return.
virtual void VisitExternalReferences(Address* start, Address* end) {}
+ virtual void VisitExternalReference(RelocInfo* rinfo);
+
inline void VisitExternalReference(Address* p) {
VisitExternalReferences(p, p + 1);
}
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index f9500c405..d834acb38 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -28,7 +28,7 @@
#include "v8.h"
#include "api.h"
-#include "ast-inl.h"
+#include "ast.h"
#include "bootstrapper.h"
#include "char-predicates-inl.h"
#include "codegen.h"
@@ -407,9 +407,9 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
}
-Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
+Scope* Parser::NewScope(Scope* parent, ScopeType type) {
Scope* result = new(zone()) Scope(parent, type);
- result->Initialize(inside_with);
+ result->Initialize();
return result;
}
@@ -459,26 +459,42 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
-// LexicalScope is a support class to facilitate manipulation of the
-// Parser's scope stack. The constructor sets the parser's top scope
-// to the incoming scope, and the destructor resets it.
-//
-// Additionally, it stores transient information used during parsing.
-// These scopes are not kept around after parsing or referenced by syntax
-// trees so they can be stack-allocated and hence used by the pre-parser.
+// FunctionState and BlockState together implement the parser's scope stack.
+// The parser's current scope is in top_scope_. The BlockState and
+// FunctionState constructors push on the scope stack and the destructors
+// pop. They are also used to hold the parser's per-function and per-block
+// state.
+
+class Parser::BlockState BASE_EMBEDDED {
+ public:
+ BlockState(Parser* parser, Scope* scope)
+ : parser_(parser),
+ outer_scope_(parser->top_scope_) {
+ parser->top_scope_ = scope;
+ }
+
+ ~BlockState() { parser_->top_scope_ = outer_scope_; }
+
+ private:
+ Parser* parser_;
+ Scope* outer_scope_;
+};
-class LexicalScope BASE_EMBEDDED {
+
+class Parser::FunctionState BASE_EMBEDDED {
public:
- LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
- ~LexicalScope();
+ FunctionState(Parser* parser, Scope* scope, Isolate* isolate);
+ ~FunctionState();
int NextMaterializedLiteralIndex() {
- int next_index =
- materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
- materialized_literal_count_++;
- return next_index;
+ return next_materialized_literal_index_++;
+ }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
}
- int materialized_literal_count() { return materialized_literal_count_; }
+
+ int NextHandlerIndex() { return next_handler_index_++; }
+ int handler_count() { return next_handler_index_; }
void SetThisPropertyAssignmentInfo(
bool only_simple_this_property_assignments,
@@ -498,10 +514,13 @@ class LexicalScope BASE_EMBEDDED {
int expected_property_count() { return expected_property_count_; }
private:
- // Captures the number of literals that need materialization in the
- // function. Includes regexp literals, and boilerplate for object
- // and array literals.
- int materialized_literal_count_;
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
+
+ // Used to assign a per-function index to try and catch handlers.
+ int next_handler_index_;
// Properties count estimation.
int expected_property_count_;
@@ -511,38 +530,35 @@ class LexicalScope BASE_EMBEDDED {
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
- // Bookkeeping
Parser* parser_;
- // Previous values
- LexicalScope* lexical_scope_parent_;
- Scope* previous_scope_;
- int previous_with_nesting_level_;
- unsigned previous_ast_node_id_;
+ FunctionState* outer_function_state_;
+ Scope* outer_scope_;
+ unsigned saved_ast_node_id_;
};
-LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
- : materialized_literal_count_(0),
- expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(isolate->factory()->empty_fixed_array()),
- parser_(parser),
- lexical_scope_parent_(parser->lexical_scope_),
- previous_scope_(parser->top_scope_),
- previous_with_nesting_level_(parser->with_nesting_level_),
- previous_ast_node_id_(isolate->ast_node_id()) {
+Parser::FunctionState::FunctionState(Parser* parser,
+ Scope* scope,
+ Isolate* isolate)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ next_handler_index_(0),
+ expected_property_count_(0),
+ only_simple_this_property_assignments_(false),
+ this_property_assignments_(isolate->factory()->empty_fixed_array()),
+ parser_(parser),
+ outer_function_state_(parser->current_function_state_),
+ outer_scope_(parser->top_scope_),
+ saved_ast_node_id_(isolate->ast_node_id()) {
parser->top_scope_ = scope;
- parser->lexical_scope_ = this;
- parser->with_nesting_level_ = 0;
+ parser->current_function_state_ = this;
isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
}
-LexicalScope::~LexicalScope() {
- parser_->top_scope_ = previous_scope_;
- parser_->lexical_scope_ = lexical_scope_parent_;
- parser_->with_nesting_level_ = previous_with_nesting_level_;
- parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
+Parser::FunctionState::~FunctionState() {
+ parser_->top_scope_ = outer_scope_;
+ parser_->current_function_state_ = outer_function_state_;
+ parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
}
@@ -570,34 +586,36 @@ LexicalScope::~LexicalScope() {
// Implementation of Parser
Parser::Parser(Handle<Script> script,
- bool allow_natives_syntax,
+ int parser_flags,
v8::Extension* extension,
ScriptDataImpl* pre_data)
: isolate_(script->GetIsolate()),
symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
script_(script),
scanner_(isolate_->unicode_cache()),
+ reusable_preparser_(NULL),
top_scope_(NULL),
- with_nesting_level_(0),
- lexical_scope_(NULL),
+ current_function_state_(NULL),
target_stack_(NULL),
- allow_natives_syntax_(allow_natives_syntax),
extension_(extension),
pre_data_(pre_data),
fni_(NULL),
+ allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
+ allow_lazy_((parser_flags & kAllowLazy) != 0),
stack_overflow_(false),
- parenthesized_function_(false),
- harmony_block_scoping_(false) {
+ parenthesized_function_(false) {
AstNode::ResetIds();
+ if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
+ scanner().SetHarmonyScoping(true);
+ }
}
-FunctionLiteral* Parser::ParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode) {
+FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse());
+ Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
fni_ = new(zone()) FuncNameInferrer(isolate());
@@ -610,47 +628,48 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
+ return DoParseProgram(info, source, &zone_scope);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
+ return DoParseProgram(info, source, &zone_scope);
}
}
-FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode,
+FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
+ Handle<String> source,
ZoneScope* zone_scope) {
+ ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
// Compute the parsing mode.
- mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+ mode_ = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
- Scope::Type type =
- in_global_context
- ? Scope::GLOBAL_SCOPE
- : Scope::EVAL_SCOPE;
Handle<String> no_name = isolate()->factory()->empty_symbol();
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, type, inside_with());
- LexicalScope lexical_scope(this, scope, isolate());
- if (strict_mode == kStrictMode) {
- top_scope_->EnableStrictMode();
+ { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ info->SetGlobalScope(scope);
+ if (!info->is_global()) {
+ scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
+ scope = NewScope(scope, EVAL_SCOPE);
}
+ scope->set_start_position(0);
+ scope->set_end_position(source->length());
+ FunctionState function_state(this, scope, isolate());
+ top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, &ok);
- if (ok && top_scope_->is_strict_mode()) {
+ if (ok && !top_scope_->is_classic_mode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
- if (ok && harmony_block_scoping_) {
+ if (ok && is_extended_mode()) {
CheckConflictingVarDeclarations(scope, &ok);
}
@@ -660,13 +679,12 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
no_name,
top_scope_,
body,
- lexical_scope.materialized_literal_count(),
- lexical_scope.expected_property_count(),
- lexical_scope.only_simple_this_property_assignments(),
- lexical_scope.this_property_assignments(),
- 0,
+ function_state.materialized_literal_count(),
+ function_state.expected_property_count(),
+ function_state.handler_count(),
+ function_state.only_simple_this_property_assignments(),
+ function_state.this_property_assignments(),
0,
- source->length(),
FunctionLiteral::ANONYMOUS_EXPRESSION,
false); // Does not have duplicate parameters.
} else if (stack_overflow_) {
@@ -714,6 +732,7 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
ZoneScope* zone_scope) {
Handle<SharedFunctionInfo> shared_info = info->shared_info();
scanner_.Initialize(source);
+ ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
@@ -727,16 +746,17 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
{
// Parse the function literal.
- Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
+ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ info->SetGlobalScope(scope);
if (!info->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info, scope);
+ scope = Scope::DeserializeScopeChain(info->closure()->context(), scope);
}
- LexicalScope lexical_scope(this, scope, isolate());
-
- if (shared_info->strict_mode()) {
- top_scope_->EnableStrictMode();
- }
-
+ FunctionState function_state(this, scope, isolate());
+ ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
+ ASSERT(scope->language_mode() != EXTENDED_MODE ||
+ info->is_extended_mode());
+ ASSERT(info->language_mode() == shared_info->language_mode());
+ scope->SetLanguageMode(shared_info->language_mode());
FunctionLiteral::Type type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -817,10 +837,6 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
isolate()->Throw(*result, &location);
}
-void Parser::SetHarmonyBlockScoping(bool block_scoping) {
- scanner().SetHarmonyBlockScoping(block_scoping);
- harmony_block_scoping_ = block_scoping;
-}
// Base class containing common code for the different finder classes used by
// the parser.
@@ -957,17 +973,18 @@ class InitializationBlockFinder : public ParserFinder {
};
-// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
+// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
// this.x = ...;, where x is a named property. It also determines whether a
// function contains only assignments of this type.
-class ThisNamedPropertyAssigmentFinder : public ParserFinder {
+class ThisNamedPropertyAssignmentFinder : public ParserFinder {
public:
- explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
+ explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
: isolate_(isolate),
only_simple_this_property_assignments_(true),
- names_(NULL),
- assigned_arguments_(NULL),
- assigned_constants_(NULL) {}
+ names_(0),
+ assigned_arguments_(0),
+ assigned_constants_(0) {
+ }
void Update(Scope* scope, Statement* stat) {
// Bail out if function already has property assignment that are
@@ -994,19 +1011,17 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
// Returns a fixed array containing three elements for each assignment of the
// form this.x = y;
Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_ == NULL) {
+ if (names_.is_empty()) {
return isolate_->factory()->empty_fixed_array();
}
- ASSERT(names_ != NULL);
- ASSERT(assigned_arguments_ != NULL);
- ASSERT_EQ(names_->length(), assigned_arguments_->length());
- ASSERT_EQ(names_->length(), assigned_constants_->length());
+ ASSERT_EQ(names_.length(), assigned_arguments_.length());
+ ASSERT_EQ(names_.length(), assigned_constants_.length());
Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_->length() * 3);
- for (int i = 0; i < names_->length(); i++) {
- assignments->set(i * 3, *names_->at(i));
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
- assignments->set(i * 3 + 2, *assigned_constants_->at(i));
+ isolate_->factory()->NewFixedArray(names_.length() * 3);
+ for (int i = 0; i < names_.length(); ++i) {
+ assignments->set(i * 3, *names_[i]);
+ assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
+ assignments->set(i * 3 + 2, *assigned_constants_[i]);
}
return assignments;
}
@@ -1063,18 +1078,37 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
AssignmentFromSomethingElse();
}
+
+
+
+ // We will potentially reorder the property assignments, so they must be
+ // simple enough that the ordering does not matter.
void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(index);
- assigned_constants_->Add(isolate_->factory()->undefined_value());
+ EnsureInitialized();
+ for (int i = 0; i < names_.length(); ++i) {
+ if (name->Equals(*names_[i])) {
+ assigned_arguments_[i] = index;
+ assigned_constants_[i] = isolate_->factory()->undefined_value();
+ return;
+ }
+ }
+ names_.Add(name);
+ assigned_arguments_.Add(index);
+ assigned_constants_.Add(isolate_->factory()->undefined_value());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(-1);
- assigned_constants_->Add(value);
+ EnsureInitialized();
+ for (int i = 0; i < names_.length(); ++i) {
+ if (name->Equals(*names_[i])) {
+ assigned_arguments_[i] = -1;
+ assigned_constants_[i] = value;
+ return;
+ }
+ }
+ names_.Add(name);
+ assigned_arguments_.Add(-1);
+ assigned_constants_.Add(value);
}
void AssignmentFromSomethingElse() {
@@ -1082,41 +1116,42 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
only_simple_this_property_assignments_ = false;
}
- void EnsureAllocation() {
- if (names_ == NULL) {
- ASSERT(assigned_arguments_ == NULL);
- ASSERT(assigned_constants_ == NULL);
- Zone* zone = isolate_->zone();
- names_ = new(zone) ZoneStringList(4);
- assigned_arguments_ = new(zone) ZoneList<int>(4);
- assigned_constants_ = new(zone) ZoneObjectList(4);
+ void EnsureInitialized() {
+ if (names_.capacity() == 0) {
+ ASSERT(assigned_arguments_.capacity() == 0);
+ ASSERT(assigned_constants_.capacity() == 0);
+ names_.Initialize(4);
+ assigned_arguments_.Initialize(4);
+ assigned_constants_.Initialize(4);
}
}
Isolate* isolate_;
bool only_simple_this_property_assignments_;
- ZoneStringList* names_;
- ZoneList<int>* assigned_arguments_;
- ZoneObjectList* assigned_constants_;
+ ZoneStringList names_;
+ ZoneList<int> assigned_arguments_;
+ ZoneObjectList assigned_constants_;
};
Statement* Parser::ParseSourceElement(ZoneStringList* labels,
bool* ok) {
+ // (Ecma 262 5th Edition, clause 14):
+ // SourceElement:
+ // Statement
+ // FunctionDeclaration
+ //
+ // In harmony mode we allow additionally the following productions
+ // SourceElement:
+ // LetDeclaration
+ // ConstDeclaration
+
if (peek() == Token::FUNCTION) {
- // FunctionDeclaration is only allowed in the context of SourceElements
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- // Common language extension is to allow function declaration in place
- // of any statement. This language extension is disabled in strict mode.
return ParseFunctionDeclaration(ok);
- } else if (peek() == Token::LET) {
+ } else if (peek() == Token::LET || peek() == Token::CONST) {
return ParseVariableStatement(kSourceElement, ok);
- } else {
- return ParseStatement(labels, ok);
}
+ return ParseStatement(labels, ok);
}
@@ -1124,7 +1159,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool* ok) {
// SourceElements ::
- // (Statement)* <end_token>
+ // (SourceElement)* <end_token>
// Allocate a target stack to use for this set of source
// elements. This way, all scripts and functions get their own
@@ -1134,7 +1169,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
ASSERT(processor != NULL);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
- ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
+ ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1160,11 +1195,13 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->handle());
// Check "use strict" directive (ES5 14.1).
- if (!top_scope_->is_strict_mode() &&
+ if (top_scope_->is_classic_mode() &&
directive->Equals(isolate()->heap()->use_strict()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict()->length() + 2) {
- top_scope_->EnableStrictMode();
+ // TODO(ES6): Fix entering extended mode, once it is specified.
+ top_scope_->SetLanguageMode(FLAG_harmony_scoping
+ ? EXTENDED_MODE : STRICT_MODE);
// "use strict" is the only directive for now.
directive_prologue = false;
}
@@ -1188,7 +1225,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
this_property_assignment_finder.only_simple_this_property_assignments()
&& top_scope_->declarations()->length() == 0;
if (only_simple_this_property_assignments) {
- lexical_scope_->SetThisPropertyAssignmentInfo(
+ current_function_state_->SetThisPropertyAssignmentInfo(
only_simple_this_property_assignments,
this_property_assignment_finder.GetThisPropertyAssignments());
}
@@ -1230,6 +1267,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
return ParseBlock(labels, ok);
case Token::CONST: // fall through
+ case Token::LET:
case Token::VAR:
stmt = ParseVariableStatement(kStatement, ok);
break;
@@ -1295,9 +1333,14 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
}
case Token::FUNCTION: {
- // In strict mode, FunctionDeclaration is only allowed in the context
- // of SourceElements.
- if (top_scope_->is_strict_mode()) {
+ // FunctionDeclaration is only allowed in the context of SourceElements
+ // (Ecma 262 5th Edition, clause 14):
+ // SourceElement:
+ // Statement
+ // FunctionDeclaration
+ // Common language extension is to allow function declaration in place
+ // of any statement. This language extension is disabled in strict mode.
+ if (!top_scope_->is_classic_mode()) {
ReportMessageAt(scanner().peek_location(), "strict_function",
Vector<const char*>::empty());
*ok = false;
@@ -1321,7 +1364,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
VariableProxy* Parser::Declare(Handle<String> name,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* fun,
bool resolve,
bool* ok) {
@@ -1329,6 +1372,12 @@ VariableProxy* Parser::Declare(Handle<String> name,
// If we are inside a function, a declaration of a var/const variable is a
// truly local variable, and the scope of the variable is always the function
// scope.
+ // Let/const variables in harmony mode are always added to the immediately
+ // enclosing scope.
+ Scope* declaration_scope = (mode == LET || mode == CONST_HARMONY)
+ ? top_scope_ : top_scope_->DeclarationScope();
+ InitializationFlag init_flag = (fun != NULL || mode == VAR)
+ ? kCreatedInitialized : kNeedsInitialization;
// If a function scope exists, then we can statically declare this
// variable and also set its mode. In any case, a Declaration node
@@ -1338,17 +1387,16 @@ VariableProxy* Parser::Declare(Handle<String> name,
// to the calling function context.
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
-
- Scope* declaration_scope = mode == Variable::LET ? top_scope_
- : top_scope_->DeclarationScope();
+ // Also for block scoped let/const bindings the variable can be
+ // statically declared.
if (declaration_scope->is_function_scope() ||
- declaration_scope->is_strict_mode_eval_scope() ||
+ declaration_scope->is_strict_or_extended_eval_scope() ||
declaration_scope->is_block_scope()) {
// Declare the variable in the function scope.
var = declaration_scope->LocalLookup(name);
if (var == NULL) {
// Declare the name.
- var = declaration_scope->DeclareLocal(name, mode);
+ var = declaration_scope->DeclareLocal(name, mode, init_flag);
} else {
// The name was declared in this scope before; check for conflicting
// re-declarations. We have a conflict if either of the declarations is
@@ -1361,12 +1409,13 @@ VariableProxy* Parser::Declare(Handle<String> name,
//
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
- if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) {
+ if ((mode != VAR) || (var->mode() != VAR)) {
// We only have vars, consts and lets in declarations.
- ASSERT(var->mode() == Variable::VAR ||
- var->mode() == Variable::CONST ||
- var->mode() == Variable::LET);
- if (harmony_block_scoping_) {
+ ASSERT(var->mode() == VAR ||
+ var->mode() == CONST ||
+ var->mode() == CONST_HARMONY ||
+ var->mode() == LET);
+ if (is_extended_mode()) {
// In harmony mode we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
@@ -1376,8 +1425,8 @@ VariableProxy* Parser::Declare(Handle<String> name,
*ok = false;
return NULL;
}
- const char* type = (var->mode() == Variable::VAR) ? "var" :
- (var->mode() == Variable::CONST) ? "const" : "let";
+ const char* type = (var->mode() == VAR)
+ ? "var" : var->is_const_mode() ? "const" : "let";
Handle<String> type_string =
isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
Expression* expression =
@@ -1405,19 +1454,35 @@ VariableProxy* Parser::Declare(Handle<String> name,
// a performance issue since it may lead to repeated
// Runtime::DeclareContextSlot() calls.
VariableProxy* proxy = declaration_scope->NewUnresolved(
- name, false, scanner().location().beg_pos);
+ name, scanner().location().beg_pos);
declaration_scope->AddDeclaration(
new(zone()) Declaration(proxy, mode, fun, top_scope_));
- // For global const variables we bind the proxy to a variable.
- if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
+ if ((mode == CONST || mode == CONST_HARMONY) &&
+ declaration_scope->is_global_scope()) {
+ // For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
var = new(zone()) Variable(declaration_scope,
name,
- Variable::CONST,
+ mode,
true,
- kind);
+ kind,
+ kNeedsInitialization);
+ } else if (declaration_scope->is_eval_scope() &&
+ declaration_scope->is_classic_mode()) {
+ // For variable declarations in a non-strict eval scope the proxy is bound
+ // to a lookup variable to force a dynamic declaration using the
+ // DeclareContextSlot runtime function.
+ Variable::Kind kind = Variable::NORMAL;
+ var = new(zone()) Variable(declaration_scope,
+ name,
+ mode,
+ true,
+ kind,
+ init_flag);
+ var->AllocateTo(Variable::LOOKUP, -1);
+ resolve = true;
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -1487,7 +1552,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared =
isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
- Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
+ Handle<ScopeInfo>(fun->shared()->scope_info()));
shared->set_construct_stub(*construct_stub);
// Copy the function data to the shared function info.
@@ -1500,7 +1565,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are setup when entering the surrounding scope.
SharedFunctionInfoLiteral* lit =
new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
- VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
+ VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK);
return new(zone()) ExpressionStatement(new(zone()) Assignment(
isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
}
@@ -1522,14 +1587,14 @@ Statement* Parser::ParseFunctionDeclaration(bool* ok) {
// Even if we're not at the top-level of the global or a function
// scope, we treat is as such and introduce the function with it's
// initial value upon entering the corresponding scope.
- Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR;
+ VariableMode mode = is_extended_mode() ? LET : VAR;
Declare(name, mode, fun, true, CHECK_OK);
return EmptyStatement();
}
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (harmony_block_scoping_) return ParseScopedBlock(labels, ok);
+ if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
// Block ::
// '{' Statement* '}'
@@ -1555,22 +1620,21 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
+ // The harmony mode uses source elements instead of statements.
+ //
+ // Block ::
+ // '{' SourceElement* '}'
+
// Construct block expecting 16 statements.
Block* body = new(zone()) Block(isolate(), labels, 16, false);
- Scope* saved_scope = top_scope_;
- Scope* block_scope = NewScope(top_scope_,
- Scope::BLOCK_SCOPE,
- inside_with());
- if (top_scope_->is_strict_mode()) {
- block_scope->EnableStrictMode();
- }
- top_scope_ = block_scope;
+ Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
- TargetCollector collector;
- Target target(&this->target_stack_, &collector);
Expect(Token::LBRACE, CHECK_OK);
- {
+ block_scope->set_start_position(scanner().location().beg_pos);
+ { BlockState block_state(this, block_scope);
+ TargetCollector collector;
+ Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -1583,8 +1647,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
- top_scope_ = saved_scope;
-
+ block_scope->set_end_position(scanner().location().end_pos);
block_scope = block_scope->FinalizeBlockScope();
body->set_block_scope(block_scope);
return body;
@@ -1598,6 +1661,7 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
Handle<String> ignore;
Block* result = ParseVariableDeclarations(var_context,
+ NULL,
&ignore,
CHECK_OK);
ExpectSemicolon(CHECK_OK);
@@ -1616,13 +1680,25 @@ bool Parser::IsEvalOrArguments(Handle<String> string) {
// *var is untouched; in particular, it is the caller's responsibility
// to initialize it properly. This mechanism is used for the parsing
// of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
- Handle<String>* out,
- bool* ok) {
+Block* Parser::ParseVariableDeclarations(
+ VariableDeclarationContext var_context,
+ VariableDeclarationProperties* decl_props,
+ Handle<String>* out,
+ bool* ok) {
// VariableDeclarations ::
- // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
- Variable::Mode mode = Variable::VAR;
+ // ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
+ //
+ // The ES6 Draft Rev3 specifies the following grammar for const declarations
+ //
+ // ConstDeclaration ::
+ // const ConstBinding (',' ConstBinding)* ';'
+ // ConstBinding ::
+ // Identifier '=' AssignmentExpression
+ //
+ // TODO(ES6):
+ // ConstBinding ::
+ // BindingPattern '=' AssignmentExpression
+ VariableMode mode = VAR;
// True if the binding needs initialization. 'let' and 'const' declared
// bindings are created uninitialized by their declaration nodes and
// need initialization. 'var' declared bindings are always initialized
@@ -1633,33 +1709,69 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (peek() == Token::VAR) {
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
+ // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
+ //
+ // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
+ //
+ // * It is a Syntax Error if the code that matches this production is not
+ // contained in extended code.
+ //
+ // However disallowing const in classic mode will break compatibility with
+ // existing pages. Therefore we keep allowing const with the old
+ // non-harmony semantics in classic mode.
Consume(Token::CONST);
- if (top_scope_->is_strict_mode()) {
- ReportMessage("strict_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
+ switch (top_scope_->language_mode()) {
+ case CLASSIC_MODE:
+ mode = CONST;
+ init_op = Token::INIT_CONST;
+ break;
+ case STRICT_MODE:
+ ReportMessage("strict_const", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ case EXTENDED_MODE:
+ if (var_context != kSourceElement &&
+ var_context != kForStatement) {
+ // In extended mode 'const' declarations are only allowed in source
+ // element positions.
+ ReportMessage("unprotected_const", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ mode = CONST_HARMONY;
+ init_op = Token::INIT_CONST_HARMONY;
}
- mode = Variable::CONST;
is_const = true;
needs_init = true;
- init_op = Token::INIT_CONST;
} else if (peek() == Token::LET) {
+ // ES6 Draft Rev4 section 12.2.1:
+ //
+ // LetDeclaration : let LetBindingList ;
+ //
+ // * It is a Syntax Error if the code that matches this production is not
+ // contained in extended code.
+ if (!is_extended_mode()) {
+ ReportMessage("illegal_let", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
+ // Let declarations are only allowed in source element positions.
ASSERT(var_context == kStatement);
ReportMessage("unprotected_let", Vector<const char*>::empty());
*ok = false;
return NULL;
}
- mode = Variable::LET;
+ mode = LET;
needs_init = true;
init_op = Token::INIT_LET;
} else {
UNREACHABLE(); // by current callers
}
- Scope* declaration_scope = mode == Variable::LET
+ Scope* declaration_scope = (mode == LET || mode == CONST_HARMONY)
? top_scope_ : top_scope_->DeclarationScope();
// The scope of a var/const declared variable anywhere inside a function
// is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
@@ -1686,7 +1798,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (fni_ != NULL) fni_->PushVariableName(name);
// Strict mode variables may not be named eval or arguments
- if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
+ if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_var_name", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1704,8 +1816,10 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// If we have a const declaration, in an inner scope, the proxy is always
// bound to the declared variable (independent of possibly surrounding with
// statements).
- Declare(name, mode, NULL, is_const /* always bound for CONST! */,
- CHECK_OK);
+ // For let/const declarations in harmony mode, we can also immediately
+ // pre-resolve the proxy because it resides in the same scope as the
+ // declaration.
+ VariableProxy* proxy = Declare(name, mode, NULL, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
ReportMessageAt(scanner().location(), "too_many_variables",
@@ -1744,7 +1858,8 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
int position = -1;
- if (peek() == Token::ASSIGN) {
+ // Harmony consts have non-optional initializers.
+ if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
Expect(Token::ASSIGN, CHECK_OK);
position = scanner().location().beg_pos;
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -1756,6 +1871,12 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
} else {
fni_->RemoveLastFunction();
}
+ if (decl_props != NULL) *decl_props = kHasInitializers;
+ }
+
+ // Record the end position of the initializer.
+ if (proxy->var() != NULL) {
+ proxy->var()->set_initializer_position(scanner().location().end_pos);
}
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
@@ -1782,7 +1903,6 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// declaration statement has been executed. This is important in
// browsers where the global object (window) has lots of
// properties defined in prototype objects.
-
if (initialization_scope->is_global_scope()) {
// Compute the arguments for the runtime call.
ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
@@ -1807,10 +1927,8 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
- StrictModeFlag flag = initialization_scope->is_strict_mode()
- ? kStrictMode
- : kNonStrictMode;
- arguments->Add(NewNumberLiteral(flag));
+ LanguageMode language_mode = initialization_scope->language_mode();
+ arguments->Add(NewNumberLiteral(language_mode));
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -1834,30 +1952,34 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
}
block->AddStatement(new(zone()) ExpressionStatement(initialize));
+ } else if (needs_init) {
+ // Constant initializations always assign to the declared constant which
+ // is always at the function scope level. This is only relevant for
+ // dynamically looked-up variables and constants (the start context for
+ // constant lookups is always the function context, while it is the top
+ // context for var declared variables). Sigh...
+ // For 'let' and 'const' declared variables in harmony mode the
+ // initialization also always assigns to the declared variable.
+ ASSERT(proxy != NULL);
+ ASSERT(proxy->var() != NULL);
+ ASSERT(value != NULL);
+ Assignment* assignment =
+ new(zone()) Assignment(isolate(), init_op, proxy, value, position);
+ block->AddStatement(new(zone()) ExpressionStatement(assignment));
+ value = NULL;
}
// Add an assignment node to the initialization statement block if we still
- // have a pending initialization value. We must distinguish between
- // different kinds of declarations: 'var' initializations are simply
- // assignments (with all the consequences if they are inside a 'with'
- // statement - they may change a 'with' object property). Constant
- // initializations always assign to the declared constant which is
- // always at the function scope level. This is only relevant for
- // dynamically looked-up variables and constants (the start context
- // for constant lookups is always the function context, while it is
- // the top context for var declared variables). Sigh...
- // For 'let' declared variables the initialization is in the same scope
- // as the declaration. Thus dynamic lookups are unnecessary even if the
- // block scope is inside a with.
+ // have a pending initialization value.
if (value != NULL) {
- bool in_with = mode == Variable::VAR ? inside_with() : false;
- VariableProxy* proxy =
- initialization_scope->NewUnresolved(name, in_with);
+ ASSERT(mode == VAR);
+ // 'var' initializations are simply assignments (with all the consequences
+ // if they are inside a 'with' statement - they may change a 'with' object
+ // property).
+ VariableProxy* proxy = initialization_scope->NewUnresolved(name);
Assignment* assignment =
new(zone()) Assignment(isolate(), init_op, proxy, value, position);
- if (block) {
- block->AddStatement(new(zone()) ExpressionStatement(assignment));
- }
+ block->AddStatement(new(zone()) ExpressionStatement(assignment));
}
if (fni_ != NULL) fni_->Leave();
@@ -2070,7 +2192,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
- if (top_scope_->is_strict_mode()) {
+ if (!top_scope_->is_classic_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2080,10 +2202,14 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- ++with_nesting_level_;
top_scope_->DeclarationScope()->RecordWithStatement();
- Statement* stmt = ParseStatement(labels, CHECK_OK);
- --with_nesting_level_;
+ Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+ Statement* stmt;
+ { BlockState block_state(this, with_scope);
+ with_scope->set_start_position(scanner().peek_location().beg_pos);
+ stmt = ParseStatement(labels, CHECK_OK);
+ with_scope->set_end_position(scanner().location().end_pos);
+ }
return new(zone()) WithStatement(expr, stmt);
}
@@ -2208,9 +2334,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
+ catch_scope = NewScope(top_scope_, CATCH_SCOPE);
+ catch_scope->set_start_position(scanner().location().beg_pos);
name = ParseIdentifier(CHECK_OK);
- if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
+ if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_catch_variable", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2220,22 +2348,16 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (peek() == Token::LBRACE) {
Target target(&this->target_stack_, &catch_collector);
- catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
- if (top_scope_->is_strict_mode()) {
- catch_scope->EnableStrictMode();
- }
- Variable::Mode mode = harmony_block_scoping_
- ? Variable::LET : Variable::VAR;
- catch_variable = catch_scope->DeclareLocal(name, mode);
+ VariableMode mode = is_extended_mode() ? LET : VAR;
+ catch_variable =
+ catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- Scope* saved_scope = top_scope_;
- top_scope_ = catch_scope;
+ BlockState block_state(this, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
- top_scope_ = saved_scope;
} else {
Expect(Token::LBRACE, CHECK_OK);
}
-
+ catch_scope->set_end_position(scanner().location().end_pos);
tok = peek();
}
@@ -2253,11 +2375,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
- TryCatchStatement* statement =
- new(zone()) TryCatchStatement(try_block,
- catch_scope,
- catch_variable,
- catch_block);
+ int index = current_function_state_->NextHandlerIndex();
+ TryCatchStatement* statement = new(zone()) TryCatchStatement(index,
+ try_block,
+ catch_scope,
+ catch_variable,
+ catch_block);
statement->set_escaping_targets(try_collector.targets());
try_block = new(zone()) Block(isolate(), NULL, 1, false);
try_block->AddStatement(statement);
@@ -2268,14 +2391,18 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
- result =
- new(zone()) TryCatchStatement(try_block,
- catch_scope,
- catch_variable,
- catch_block);
+ int index = current_function_state_->NextHandlerIndex();
+ result = new(zone()) TryCatchStatement(index,
+ try_block,
+ catch_scope,
+ catch_variable,
+ catch_block);
} else {
ASSERT(finally_block != NULL);
- result = new(zone()) TryFinallyStatement(try_block, finally_block);
+ int index = current_function_state_->NextHandlerIndex();
+ result = new(zone()) TryFinallyStatement(index,
+ try_block,
+ finally_block);
// Combine the jump targets of the try block and the possible catch block.
try_collector.targets()->AddAll(*catch_collector.targets());
}
@@ -2341,16 +2468,22 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* init = NULL;
+ // Create an in-between scope for let-bound iteration variables.
+ Scope* saved_scope = top_scope_;
+ Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
+ top_scope_ = for_scope;
+
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
+ for_scope->set_start_position(scanner().location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
Handle<String> name;
Block* variable_statement =
- ParseVariableDeclarations(kForStatement, &name, CHECK_OK);
+ ParseVariableDeclarations(kForStatement, NULL, &name, CHECK_OK);
if (peek() == Token::IN && !name.is_null()) {
- VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+ VariableProxy* each = top_scope_->NewUnresolved(name);
ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
@@ -2363,12 +2496,73 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Block* result = new(zone()) Block(isolate(), NULL, 2, false);
result->AddStatement(variable_statement);
result->AddStatement(loop);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
+ for_scope = for_scope->FinalizeBlockScope();
+ ASSERT(for_scope == NULL);
// Parsed for-in loop w/ variable/const declaration.
return result;
} else {
init = variable_statement;
}
+ } else if (peek() == Token::LET) {
+ Handle<String> name;
+ VariableDeclarationProperties decl_props = kHasNoInitializers;
+ Block* variable_statement =
+ ParseVariableDeclarations(kForStatement,
+ &decl_props,
+ &name,
+ CHECK_OK);
+ bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
+ if (peek() == Token::IN && accept_IN) {
+ // Rewrite a for-in statement of the form
+ //
+ // for (let x in e) b
+ //
+ // into
+ //
+ // <let x' be a temporary variable>
+ // for (x' in e) {
+ // let x;
+ // x = x';
+ // b;
+ // }
+
+ // TODO(keuchel): Move the temporary variable to the block scope, after
+ // implementing stack allocated block scoped variables.
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
+ VariableProxy* temp_proxy = new(zone()) VariableProxy(isolate(), temp);
+ VariableProxy* each = top_scope_->NewUnresolved(name);
+ ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
+ Target target(&this->target_stack_, loop);
+ Expect(Token::IN, CHECK_OK);
+ Expression* enumerable = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ Statement* body = ParseStatement(NULL, CHECK_OK);
+ Block* body_block = new(zone()) Block(isolate(), NULL, 3, false);
+ Assignment* assignment = new(zone()) Assignment(isolate(),
+ Token::ASSIGN,
+ each,
+ temp_proxy,
+ RelocInfo::kNoPosition);
+ Statement* assignment_statement =
+ new(zone()) ExpressionStatement(assignment);
+ body_block->AddStatement(variable_statement);
+ body_block->AddStatement(assignment_statement);
+ body_block->AddStatement(body);
+ loop->Initialize(temp_proxy, enumerable, body_block);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
+ for_scope = for_scope->FinalizeBlockScope();
+ body_block->set_block_scope(for_scope);
+ // Parsed for-in loop w/ let declaration.
+ return loop;
+
+ } else {
+ init = variable_statement;
+ }
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
if (peek() == Token::IN) {
@@ -2390,6 +2584,10 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
if (loop) loop->Initialize(expression, enumerable, body);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
+ for_scope = for_scope->FinalizeBlockScope();
+ ASSERT(for_scope == NULL);
// Parsed for-in loop.
return loop;
@@ -2420,8 +2618,31 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- if (loop) loop->Initialize(init, cond, next, body);
- return loop;
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
+ for_scope = for_scope->FinalizeBlockScope();
+ if (for_scope != NULL) {
+ // Rewrite a for statement of the form
+ //
+ // for (let x = i; c; n) b
+ //
+ // into
+ //
+ // {
+ // let x = i;
+ // for (; c; n) b
+ // }
+ ASSERT(init != NULL);
+ Block* result = new(zone()) Block(isolate(), NULL, 2, false);
+ result->AddStatement(init);
+ result->AddStatement(loop);
+ result->set_block_scope(for_scope);
+ if (loop) loop->Initialize(NULL, cond, next, body);
+ return result;
+ } else {
+ if (loop) loop->Initialize(init, cond, next, body);
+ return loop;
+ }
}
@@ -2468,7 +2689,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
expression = NewThrowReferenceError(type);
}
- if (top_scope_->is_strict_mode()) {
+ if (!top_scope_->is_classic_mode()) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
@@ -2487,13 +2708,13 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
property != NULL &&
property->obj()->AsVariableProxy() != NULL &&
property->obj()->AsVariableProxy()->is_this()) {
- lexical_scope_->AddProperty();
+ current_function_state_->AddProperty();
}
// If we assign a function literal to a property we pretenure the
// literal so it can be added as a constant function property.
if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure(true);
+ right->AsFunctionLiteral()->set_pretenure();
}
if (fni_ != NULL) {
@@ -2618,7 +2839,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = NewCompareNode(cmp, x, y, position);
+ x = new(zone()) CompareOperation(isolate(), cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
@@ -2634,27 +2855,6 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
}
-Expression* Parser::NewCompareNode(Token::Value op,
- Expression* x,
- Expression* y,
- int position) {
- ASSERT(op != Token::NE && op != Token::NE_STRICT);
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- bool is_strict = (op == Token::EQ_STRICT);
- Literal* x_literal = x->AsLiteral();
- if (x_literal != NULL && x_literal->IsNull()) {
- return new(zone()) CompareToNull(isolate(), is_strict, y);
- }
-
- Literal* y_literal = y->AsLiteral();
- if (y_literal != NULL && y_literal->IsNull()) {
- return new(zone()) CompareToNull(isolate(), is_strict, x);
- }
- }
- return new(zone()) CompareOperation(isolate(), op, x, y, position);
-}
-
-
Expression* Parser::ParseUnaryExpression(bool* ok) {
// UnaryExpression ::
// PostfixExpression
@@ -2698,7 +2898,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
// "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && top_scope_->is_strict_mode()) {
+ if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
VariableProxy* operand = expression->AsVariableProxy();
if (operand != NULL && !operand->is_this()) {
ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -2722,7 +2922,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
expression = NewThrowReferenceError(type);
}
- if (top_scope_->is_strict_mode()) {
+ if (!top_scope_->is_classic_mode()) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
@@ -2757,7 +2957,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
expression = NewThrowReferenceError(type);
}
- if (top_scope_->is_strict_mode()) {
+ if (!top_scope_->is_classic_mode()) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
@@ -2804,23 +3004,14 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// Keep track of eval() calls since they disable all local variable
// optimizations.
// The calls that need special treatment are the
- // direct (i.e. not aliased) eval calls. These calls are all of the
- // form eval(...) with no explicit receiver object where eval is not
- // declared in the current scope chain.
+ // direct eval calls. These calls are all of the form eval(...), with
+ // no explicit receiver.
// These calls are marked as potentially direct eval calls. Whether
// they are actually direct calls to eval is determined at run time.
- // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
- // in the local scope chain. It only matters that it's called "eval",
- // is called without a receiver and it refers to the original eval
- // function.
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL &&
callee->IsVariable(isolate()->factory()->eval_symbol())) {
- Handle<String> name = callee->name();
- Variable* var = top_scope_->Lookup(name);
- if (var == NULL) {
- top_scope_->DeclarationScope()->RecordEvalCall();
- }
+ top_scope_->DeclarationScope()->RecordEvalCall();
}
result = NewCall(result, args, pos);
break;
@@ -2997,9 +3188,9 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
return ReportMessage("unexpected_reserved",
Vector<const char*>::empty());
case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessage(top_scope_->is_strict_mode() ?
- "unexpected_strict_reserved" :
- "unexpected_token_identifier",
+ return ReportMessage(top_scope_->is_classic_mode() ?
+ "unexpected_token_identifier" :
+ "unexpected_strict_reserved",
Vector<const char*>::empty());
default:
const char* name = Token::String(token);
@@ -3062,9 +3253,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::FUTURE_STRICT_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
- result = top_scope_->NewUnresolved(name,
- inside_with(),
- scanner().location().beg_pos);
+ result = top_scope_->NewUnresolved(name, scanner().location().beg_pos);
break;
}
@@ -3179,11 +3368,13 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Expect(Token::RBRACK, CHECK_OK);
// Update the scope information before the pre-parsing bailout.
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
- // Allocate a fixed array with all the literals.
- Handle<FixedArray> literals =
+ // Allocate a fixed array to hold all the object literals.
+ Handle<FixedArray> object_literals =
isolate()->factory()->NewFixedArray(values->length(), TENURED);
+ Handle<FixedDoubleArray> double_literals;
+ ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
// Fill in the literals.
bool is_simple = true;
@@ -3195,19 +3386,75 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
+ object_literals->set_the_hole(i);
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ double_literals->set_the_hole(i);
+ }
is_simple = false;
} else {
- literals->set(i, *boilerplate_value);
+ // Examine each literal element, and adjust the ElementsKind if the
+ // literal element is not of a type that can be stored in the current
+ // ElementsKind. Start with FAST_SMI_ONLY_ELEMENTS, and transition to
+ // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
+ // the tagged value, no matter what the ElementsKind is in case we
+ // ultimately end up in FAST_ELEMENTS.
+ object_literals->set(i, *boilerplate_value);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
+ // FAST_ELEMENTS is required.
+ if (!boilerplate_value->IsSmi()) {
+ if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
+ // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
+ // avoid over-allocating in TENURED space.
+ double_literals = isolate()->factory()->NewFixedDoubleArray(
+ values->length(), TENURED);
+ // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
+ // FAST_DOUBLE_ELEMENTS array so that they are in sync.
+ for (int j = 0; j < i; ++j) {
+ Object* smi_value = object_literals->get(j);
+ if (smi_value->IsTheHole()) {
+ double_literals->set_the_hole(j);
+ } else {
+ double_literals->set(j, Smi::cast(smi_value)->value());
+ }
+ }
+ double_literals->set(i, boilerplate_value->Number());
+ elements_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
+ }
+ } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
+ // until the first value is seen that can't be stored as a double.
+ if (boilerplate_value->IsNumber()) {
+ double_literals->set(i, boilerplate_value->Number());
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
+ }
}
}
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
- if (is_simple && depth == 1 && values->length() > 0) {
- literals->set_map(isolate()->heap()->fixed_cow_array_map());
+ if (is_simple && depth == 1 && values->length() > 0 &&
+ elements_kind != FAST_DOUBLE_ELEMENTS) {
+ object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
}
+ Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
+ ? Handle<FixedArrayBase>(double_literals)
+ : Handle<FixedArrayBase>(object_literals);
+
+ // Remember both the literal's constant values as well as the ElementsKind
+ // in a 2-element FixedArray.
+ Handle<FixedArray> literals =
+ isolate()->factory()->NewFixedArray(2, TENURED);
+
+ literals->set(0, Smi::FromInt(elements_kind));
+ literals->set(1, *element_values);
+
return new(zone()) ArrayLiteral(
isolate(), literals, values, literal_index, is_simple, depth);
}
@@ -3289,11 +3536,11 @@ bool IsEqualNumber(void* first, void* second);
// Validation per 11.1.5 Object Initialiser
class ObjectLiteralPropertyChecker {
public:
- ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
+ ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
props(&IsEqualString),
elems(&IsEqualNumber),
parser_(parser),
- strict_(strict) {
+ language_mode_(language_mode) {
}
void CheckProperty(
@@ -3323,7 +3570,7 @@ class ObjectLiteralPropertyChecker {
HashMap props;
HashMap elems;
Parser* parser_;
- bool strict_;
+ LanguageMode language_mode_;
};
@@ -3372,8 +3619,8 @@ void ObjectLiteralPropertyChecker::CheckProperty(
intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
intptr_t curr = GetPropertyKind(property);
- // Duplicate data properties are illegal in strict mode.
- if (strict_ && (curr & prev & kData) != 0) {
+ // Duplicate data properties are illegal in strict or extended mode.
+ if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
parser_->ReportMessageAt(loc, "strict_duplicate_property",
Vector<const char*>::empty());
*ok = false;
@@ -3509,7 +3756,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
int number_of_boilerplate_properties = 0;
bool has_function = false;
- ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
+ ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
@@ -3597,11 +3844,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
ObjectLiteral::Property* property =
new(zone()) ObjectLiteral::Property(key, value);
- // Mark object literals that contain function literals and pretenure the
- // literal so it can be added as a constant function property.
- if (value->AsFunctionLiteral() != NULL) {
+ // Mark top-level object literals that contain function literals and
+ // pretenure the literal so it can be added as a constant function
+ // property.
+ if (top_scope_->DeclarationScope()->is_global_scope() &&
+ value->AsFunctionLiteral() != NULL) {
has_function = true;
- value->AsFunctionLiteral()->set_pretenure(true);
+ value->AsFunctionLiteral()->set_pretenure();
}
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -3621,7 +3870,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
number_of_boilerplate_properties * 2, TENURED);
@@ -3653,7 +3902,7 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
return NULL;
}
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
Handle<String> js_pattern = NextLiteralString(TENURED);
scanner().ScanRegExpFlags();
@@ -3689,6 +3938,98 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
}
+class SingletonLogger : public ParserRecorder {
+ public:
+ SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
+ ~SingletonLogger() { }
+
+ void Reset() { has_error_ = false; }
+
+ virtual void LogFunction(int start,
+ int end,
+ int literals,
+ int properties,
+ LanguageMode mode) {
+ ASSERT(!has_error_);
+ start_ = start;
+ end_ = end;
+ literals_ = literals;
+ properties_ = properties;
+ mode_ = mode;
+ };
+
+ // Logs a symbol creation of a literal or identifier.
+ virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
+ virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+
+ // Logs an error message and marks the log as containing an error.
+ // Further logging will be ignored, and ExtractData will return a vector
+ // representing the error only.
+ virtual void LogMessage(int start,
+ int end,
+ const char* message,
+ const char* argument_opt) {
+ has_error_ = true;
+ start_ = start;
+ end_ = end;
+ message_ = message;
+ argument_opt_ = argument_opt;
+ }
+
+ virtual int function_position() { return 0; }
+
+ virtual int symbol_position() { return 0; }
+
+ virtual int symbol_ids() { return -1; }
+
+ virtual Vector<unsigned> ExtractData() {
+ UNREACHABLE();
+ return Vector<unsigned>();
+ }
+
+ virtual void PauseRecording() { }
+
+ virtual void ResumeRecording() { }
+
+ bool has_error() { return has_error_; }
+
+ int start() { return start_; }
+ int end() { return end_; }
+ int literals() {
+ ASSERT(!has_error_);
+ return literals_;
+ }
+ int properties() {
+ ASSERT(!has_error_);
+ return properties_;
+ }
+ LanguageMode language_mode() {
+ ASSERT(!has_error_);
+ return mode_;
+ }
+ const char* message() {
+ ASSERT(has_error_);
+ return message_;
+ }
+ const char* argument_opt() {
+ ASSERT(has_error_);
+ return argument_opt_;
+ }
+
+ private:
+ bool has_error_;
+ int start_;
+ int end_;
+ // For function entries.
+ int literals_;
+ int properties_;
+ LanguageMode mode_;
+ // For error messages.
+ const char* message_;
+ const char* argument_opt_;
+};
+
+
FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
bool name_is_strict_reserved,
int function_token_position,
@@ -3711,26 +4052,24 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
- Scope* scope = (type == FunctionLiteral::DECLARATION &&
- !harmony_block_scoping_)
- ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
- : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
- ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
- int materialized_literal_count;
- int expected_property_count;
- int start_pos;
- int end_pos;
+ Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
+ ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
+ : NewScope(top_scope_, FUNCTION_SCOPE);
+ ZoneList<Statement*>* body = NULL;
+ int materialized_literal_count = -1;
+ int expected_property_count = -1;
+ int handler_count = 0;
bool only_simple_this_property_assignments;
Handle<FixedArray> this_property_assignments;
bool has_duplicate_parameters = false;
// Parse function body.
- { LexicalScope lexical_scope(this, scope, isolate());
+ { FunctionState function_state(this, scope, isolate());
top_scope_->SetScopeName(function_name);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- start_pos = scanner().location().beg_pos;
+ scope->set_start_position(scanner().location().beg_pos);
Scanner::Location name_loc = Scanner::Location::invalid();
Scanner::Location dupe_loc = Scanner::Location::invalid();
Scanner::Location reserved_loc = Scanner::Location::invalid();
@@ -3754,10 +4093,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
reserved_loc = scanner().location();
}
- top_scope_->DeclareParameter(param_name,
- harmony_block_scoping_
- ? Variable::LET
- : Variable::VAR);
+ top_scope_->DeclareParameter(param_name, is_extended_mode() ? LET : VAR);
num_parameters++;
if (num_parameters > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3778,71 +4114,129 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
+ Variable* fvar = NULL;
+ Token::Value fvar_init_op = Token::INIT_CONST;
if (type == FunctionLiteral::NAMED_EXPRESSION) {
- Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
- VariableProxy* fproxy =
- top_scope_->NewUnresolved(function_name, inside_with());
- fproxy->BindTo(fvar);
- body->Add(new(zone()) ExpressionStatement(
- new(zone()) Assignment(isolate(),
- Token::INIT_CONST,
- fproxy,
- new(zone()) ThisFunction(isolate()),
- RelocInfo::kNoPosition)));
+ VariableMode fvar_mode;
+ if (is_extended_mode()) {
+ fvar_mode = CONST_HARMONY;
+ fvar_init_op = Token::INIT_CONST_HARMONY;
+ } else {
+ fvar_mode = CONST;
+ }
+ fvar = top_scope_->DeclareFunctionVar(function_name, fvar_mode);
}
- // Determine if the function will be lazily compiled. The mode can only
- // be PARSE_LAZILY if the --lazy flag is true. We will not lazily
- // compile if we do not have preparser data for the function.
+ // Determine whether the function will be lazily compiled.
+ // The heuristics are:
+ // - It must not have been prohibited by the caller to Parse (some callers
+ // need a full AST).
+ // - The outer scope must be trivial (only global variables in scope).
+ // - The function mustn't be a function expression with an open parenthesis
+ // before; we consider that a hint that the function will be called
+ // immediately, and it would be a waste of time to make it lazily
+ // compiled.
+ // These are all things we can know at this point, without looking at the
+ // function itself.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
top_scope_->outer_scope()->is_global_scope() &&
top_scope_->HasTrivialOuterContext() &&
- !parenthesized_function_ &&
- pre_data() != NULL);
+ !parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
if (is_lazily_compiled) {
int function_block_pos = scanner().location().beg_pos;
- FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
- if (!entry.is_valid()) {
- // There is no preparser data for the function, we will not lazily
- // compile after all.
- is_lazily_compiled = false;
+ FunctionEntry entry;
+ if (pre_data_ != NULL) {
+ // If we have pre_data_, we use it to skip parsing the function body.
+ // the preparser data contains the information we need to construct the
+ // lazy function.
+ entry = pre_data()->GetFunctionEntry(function_block_pos);
+ if (entry.is_valid()) {
+ if (entry.end_pos() <= function_block_pos) {
+ // End position greater than end of stream is safe, and hard
+ // to check.
+ ReportInvalidPreparseData(function_name, CHECK_OK);
+ }
+ scanner().SeekForward(entry.end_pos() - 1);
+
+ scope->set_end_position(entry.end_pos());
+ Expect(Token::RBRACE, CHECK_OK);
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ scope->end_position() - function_block_pos);
+ materialized_literal_count = entry.literal_count();
+ expected_property_count = entry.property_count();
+ top_scope_->SetLanguageMode(entry.language_mode());
+ only_simple_this_property_assignments = false;
+ this_property_assignments = isolate()->factory()->empty_fixed_array();
+ } else {
+ is_lazily_compiled = false;
+ }
} else {
- end_pos = entry.end_pos();
- if (end_pos <= function_block_pos) {
- // End position greater than end of stream is safe, and hard to check.
- ReportInvalidPreparseData(function_name, CHECK_OK);
+ // With no preparser data, we partially parse the function, without
+ // building an AST. This gathers the data needed to build a lazy
+ // function.
+ SingletonLogger logger;
+ preparser::PreParser::PreParseResult result =
+ LazyParseFunctionLiteral(&logger);
+ if (result == preparser::PreParser::kPreParseStackOverflow) {
+ // Propagate stack overflow.
+ stack_overflow_ = true;
+ *ok = false;
+ return NULL;
+ }
+ if (logger.has_error()) {
+ const char* arg = logger.argument_opt();
+ Vector<const char*> args;
+ if (arg != NULL) {
+ args = Vector<const char*>(&arg, 1);
+ }
+ ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
+ logger.message(), args);
+ *ok = false;
+ return NULL;
}
+ scope->set_end_position(logger.end());
+ Expect(Token::RBRACE, CHECK_OK);
isolate()->counters()->total_preparse_skipped()->Increment(
- end_pos - function_block_pos);
- // Seek to position just before terminal '}'.
- scanner().SeekForward(end_pos - 1);
- materialized_literal_count = entry.literal_count();
- expected_property_count = entry.property_count();
- if (entry.strict_mode()) top_scope_->EnableStrictMode();
+ scope->end_position() - function_block_pos);
+ materialized_literal_count = logger.literals();
+ expected_property_count = logger.properties();
+ top_scope_->SetLanguageMode(logger.language_mode());
only_simple_this_property_assignments = false;
this_property_assignments = isolate()->factory()->empty_fixed_array();
- Expect(Token::RBRACE, CHECK_OK);
}
}
if (!is_lazily_compiled) {
+ body = new(zone()) ZoneList<Statement*>(8);
+ if (fvar != NULL) {
+ VariableProxy* fproxy = top_scope_->NewUnresolved(function_name);
+ fproxy->BindTo(fvar);
+ body->Add(new(zone()) ExpressionStatement(
+ new(zone()) Assignment(isolate(),
+ fvar_init_op,
+ fproxy,
+ new(zone()) ThisFunction(isolate()),
+ RelocInfo::kNoPosition)));
+ }
ParseSourceElements(body, Token::RBRACE, CHECK_OK);
- materialized_literal_count = lexical_scope.materialized_literal_count();
- expected_property_count = lexical_scope.expected_property_count();
+ materialized_literal_count = function_state.materialized_literal_count();
+ expected_property_count = function_state.expected_property_count();
+ handler_count = function_state.handler_count();
only_simple_this_property_assignments =
- lexical_scope.only_simple_this_property_assignments();
- this_property_assignments = lexical_scope.this_property_assignments();
+ function_state.only_simple_this_property_assignments();
+ this_property_assignments = function_state.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
- end_pos = scanner().location().end_pos;
+ scope->set_end_position(scanner().location().end_pos);
}
// Validate strict mode.
- if (top_scope_->is_strict_mode()) {
+ if (!top_scope_->is_classic_mode()) {
if (IsEvalOrArguments(function_name)) {
+ int start_pos = scope->start_position();
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3865,6 +4259,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
return NULL;
}
if (name_is_strict_reserved) {
+ int start_pos = scope->start_position();
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3880,11 +4275,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
*ok = false;
return NULL;
}
- CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
+ CheckOctalLiteral(scope->start_position(),
+ scope->end_position(),
+ CHECK_OK);
}
}
- if (harmony_block_scoping_) {
+ if (is_extended_mode()) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
@@ -3895,11 +4292,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
body,
materialized_literal_count,
expected_property_count,
+ handler_count,
only_simple_this_property_assignments,
this_property_assignments,
num_parameters,
- start_pos,
- end_pos,
type,
has_duplicate_parameters);
function_literal->set_function_token_position(function_token_position);
@@ -3909,6 +4305,27 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
}
+preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+ SingletonLogger* logger) {
+ HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
+ ASSERT_EQ(Token::LBRACE, scanner().current_token());
+
+ if (reusable_preparser_ == NULL) {
+ intptr_t stack_limit = isolate()->stack_guard()->real_climit();
+ bool do_allow_lazy = true;
+ reusable_preparser_ = new preparser::PreParser(&scanner_,
+ NULL,
+ stack_limit,
+ do_allow_lazy,
+ allow_natives_syntax_);
+ }
+ preparser::PreParser::PreParseResult result =
+ reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+ logger);
+ return result;
+}
+
+
Expression* Parser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
@@ -4024,7 +4441,7 @@ Literal* Parser::GetLiteralNumber(double value) {
// Parses an identifier that is valid for the current scope, in particular it
// fails on strict mode future reserved keywords in a strict scope.
Handle<String> Parser::ParseIdentifier(bool* ok) {
- if (top_scope_->is_strict_mode()) {
+ if (!top_scope_->is_classic_mode()) {
Expect(Token::IDENTIFIER, ok);
} else if (!Check(Token::IDENTIFIER)) {
Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
@@ -4067,7 +4484,7 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
void Parser::CheckStrictModeLValue(Expression* expression,
const char* error,
bool* ok) {
- ASSERT(top_scope_->is_strict_mode());
+ ASSERT(!top_scope_->is_classic_mode());
VariableProxy* lhs = expression != NULL
? expression->AsVariableProxy()
: NULL;
@@ -5120,18 +5537,20 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
- bool allow_lazy,
- ParserRecorder* recorder,
- bool harmony_block_scoping) {
+ int flags,
+ ParserRecorder* recorder) {
Isolate* isolate = Isolate::Current();
- JavaScriptScanner scanner(isolate->unicode_cache());
- scanner.SetHarmonyBlockScoping(harmony_block_scoping);
+ HistogramTimerScope timer(isolate->counters()->pre_parse());
+ Scanner scanner(isolate->unicode_cache());
+ scanner.SetHarmonyScoping(FLAG_harmony_scoping);
scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
- if (!preparser::PreParser::PreParseProgram(&scanner,
- recorder,
- allow_lazy,
- stack_limit)) {
+ preparser::PreParser::PreParseResult result =
+ preparser::PreParser::PreParseProgram(&scanner,
+ recorder,
+ flags,
+ stack_limit);
+ if (result == preparser::PreParser::kPreParseStackOverflow) {
isolate->StackOverflow();
return NULL;
}
@@ -5145,27 +5564,38 @@ static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
// Preparse, but only collect data that is immediately useful,
// even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
+ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
v8::Extension* extension,
- bool harmony_block_scoping) {
+ int flags) {
bool allow_lazy = FLAG_lazy && (extension == NULL);
if (!allow_lazy) {
// Partial preparsing is only about lazily compiled functions.
// If we don't allow lazy compilation, the log data will be empty.
return NULL;
}
+ flags |= kAllowLazy;
PartialParserRecorder recorder;
- return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
+ int source_length = source->length();
+ if (source->IsExternalTwoByteString()) {
+ ExternalTwoByteStringUC16CharacterStream stream(
+ Handle<ExternalTwoByteString>::cast(source), 0, source_length);
+ return DoPreParse(&stream, flags, &recorder);
+ } else {
+ GenericStringUC16CharacterStream stream(source, 0, source_length);
+ return DoPreParse(&stream, flags, &recorder);
+ }
}
ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
v8::Extension* extension,
- bool harmony_block_scoping) {
+ int flags) {
Handle<Script> no_script;
- bool allow_lazy = FLAG_lazy && (extension == NULL);
+ if (FLAG_lazy && (extension == NULL)) {
+ flags |= kAllowLazy;
+ }
CompleteParserRecorder recorder;
- return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
+ return DoPreParse(source, flags, &recorder);
}
@@ -5191,29 +5621,26 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
}
-bool ParserApi::Parse(CompilationInfo* info) {
+bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
ASSERT(info->function() == NULL);
FunctionLiteral* result = NULL;
Handle<Script> script = info->script();
- bool harmony_block_scoping = !info->is_native() &&
- FLAG_harmony_block_scoping;
+ ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
+ if (!info->is_native() && FLAG_harmony_scoping) {
+ // Harmony scoping is requested.
+ parsing_flags |= EXTENDED_MODE;
+ }
+ if (FLAG_allow_natives_syntax || info->is_native()) {
+ // We requre %identifier(..) syntax.
+ parsing_flags |= kAllowNativesSyntax;
+ }
if (info->is_lazy()) {
- bool allow_natives_syntax =
- FLAG_allow_natives_syntax ||
- info->is_native();
- Parser parser(script, allow_natives_syntax, NULL, NULL);
- parser.SetHarmonyBlockScoping(harmony_block_scoping);
+ ASSERT(!info->is_eval());
+ Parser parser(script, parsing_flags, NULL, NULL);
result = parser.ParseLazy(info);
} else {
- // Whether we allow %identifier(..) syntax.
- bool allow_natives_syntax =
- info->is_native() || FLAG_allow_natives_syntax;
ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(script,
- allow_natives_syntax,
- info->extension(),
- pre_data);
- parser.SetHarmonyBlockScoping(harmony_block_scoping);
+ Parser parser(script, parsing_flags, info->extension(), pre_data);
if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation();
const char* message = pre_data->BuildMessage();
@@ -5226,10 +5653,7 @@ bool ParserApi::Parse(CompilationInfo* info) {
DeleteArray(args.start());
ASSERT(info->isolate()->has_pending_exception());
} else {
- Handle<String> source = Handle<String>(String::cast(script->source()));
- result = parser.ParseProgram(source,
- info->is_global(),
- info->StrictMode());
+ result = parser.ParseProgram(info);
}
}
info->SetFunction(result);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 3312f2f56..75f8e1093 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -33,6 +33,7 @@
#include "preparse-data-format.h"
#include "preparse-data.h"
#include "scopes.h"
+#include "preparser.h"
namespace v8 {
namespace internal {
@@ -42,7 +43,6 @@ class FuncNameInferrer;
class ParserLog;
class PositionStack;
class Target;
-class LexicalScope;
template <typename T> class ZoneListWrapper;
@@ -67,26 +67,36 @@ class ParserMessage : public Malloced {
class FunctionEntry BASE_EMBEDDED {
public:
- explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
- FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
+ enum {
+ kStartPositionIndex,
+ kEndPositionIndex,
+ kLiteralCountIndex,
+ kPropertyCountIndex,
+ kLanguageModeIndex,
+ kSize
+ };
+
+ explicit FunctionEntry(Vector<unsigned> backing)
+ : backing_(backing) { }
- int start_pos() { return backing_[kStartPosOffset]; }
- int end_pos() { return backing_[kEndPosOffset]; }
- int literal_count() { return backing_[kLiteralCountOffset]; }
- int property_count() { return backing_[kPropertyCountOffset]; }
- bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
+ FunctionEntry() : backing_() { }
- bool is_valid() { return backing_.length() > 0; }
+ int start_pos() { return backing_[kStartPositionIndex]; }
+ int end_pos() { return backing_[kEndPositionIndex]; }
+ int literal_count() { return backing_[kLiteralCountIndex]; }
+ int property_count() { return backing_[kPropertyCountIndex]; }
+ LanguageMode language_mode() {
+ ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
+ backing_[kLanguageModeIndex] == STRICT_MODE ||
+ backing_[kLanguageModeIndex] == EXTENDED_MODE);
+ return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+ }
- static const int kSize = 5;
+ bool is_valid() { return !backing_.is_empty(); }
private:
Vector<unsigned> backing_;
- static const int kStartPosOffset = 0;
- static const int kEndPosOffset = 1;
- static const int kLiteralCountOffset = 2;
- static const int kPropertyCountOffset = 3;
- static const int kStrictModeOffset = 4;
+ bool owns_data_;
};
@@ -98,7 +108,7 @@ class ScriptDataImpl : public ScriptData {
// Create an empty ScriptDataImpl that is guaranteed to not satisfy
// a SanityCheck.
- ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
+ ScriptDataImpl() : owns_store_(false) { }
virtual ~ScriptDataImpl();
virtual int Length();
@@ -159,18 +169,18 @@ class ParserApi {
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
- static bool Parse(CompilationInfo* info);
+ static bool Parse(CompilationInfo* info, int flags);
// Generic preparser generating full preparse data.
static ScriptDataImpl* PreParse(UC16CharacterStream* source,
v8::Extension* extension,
- bool harmony_block_scoping);
+ int flags);
// Preparser that only does preprocessing that makes sense if only used
// immediately after.
- static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
+ static ScriptDataImpl* PartialPreParse(Handle<String> source,
v8::Extension* extension,
- bool harmony_block_scoping);
+ int flags);
};
// ----------------------------------------------------------------------------
@@ -415,19 +425,23 @@ class RegExpParser {
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
+// Forward declaration.
+class SingletonLogger;
+
class Parser {
public:
Parser(Handle<Script> script,
- bool allow_natives_syntax,
+ int parsing_flags, // Combination of ParsingFlags
v8::Extension* extension,
ScriptDataImpl* pre_data);
- virtual ~Parser() { }
+ virtual ~Parser() {
+ if (reusable_preparser_ != NULL) {
+ delete reusable_preparser_;
+ }
+ }
// Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode);
-
+ FunctionLiteral* ParseProgram(CompilationInfo* info);
FunctionLiteral* ParseLazy(CompilationInfo* info);
void ReportMessageAt(Scanner::Location loc,
@@ -436,7 +450,6 @@ class Parser {
void ReportMessageAt(Scanner::Location loc,
const char* message,
Vector<Handle<String> > args);
- void SetHarmonyBlockScoping(bool block_scoping);
private:
// Limit on number of function parameters is chosen arbitrarily.
@@ -445,9 +458,7 @@ class Parser {
// should be checked.
static const int kMaxNumFunctionParameters = 32766;
static const int kMaxNumFunctionLocals = 32767;
- FunctionLiteral* ParseLazy(CompilationInfo* info,
- UC16CharacterStream* source,
- ZoneScope* zone_scope);
+
enum Mode {
PARSE_LAZILY,
PARSE_EAGERLY
@@ -459,13 +470,25 @@ class Parser {
kForStatement
};
+ // If a list of variable declarations includes any initializers.
+ enum VariableDeclarationProperties {
+ kHasInitializers,
+ kHasNoInitializers
+ };
+
+ class BlockState;
+ class FunctionState;
+
+ FunctionLiteral* ParseLazy(CompilationInfo* info,
+ UC16CharacterStream* source,
+ ZoneScope* zone_scope);
+
Isolate* isolate() { return isolate_; }
Zone* zone() { return isolate_->zone(); }
// Called by ParseProgram after setting up the scanner.
- FunctionLiteral* DoParseProgram(Handle<String> source,
- bool in_global_context,
- StrictModeFlag strict_mode,
+ FunctionLiteral* DoParseProgram(CompilationInfo* info,
+ Handle<String> source,
ZoneScope* zone_scope);
// Report syntax error
@@ -473,10 +496,14 @@ class Parser {
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
void ReportMessage(const char* message, Vector<const char*> args);
- bool inside_with() const { return with_nesting_level_ > 0; }
- JavaScriptScanner& scanner() { return scanner_; }
+ bool inside_with() const { return top_scope_->inside_with(); }
+ Scanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
+ bool is_extended_mode() {
+ ASSERT(top_scope_ != NULL);
+ return top_scope_->is_extended_mode();
+ }
// Check if the given string is 'eval' or 'arguments'.
bool IsEvalOrArguments(Handle<String> string);
@@ -492,10 +519,10 @@ class Parser {
Statement* ParseFunctionDeclaration(bool* ok);
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneStringList* labels, bool* ok);
- Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
Block* ParseVariableStatement(VariableDeclarationContext var_context,
bool* ok);
Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
+ VariableDeclarationProperties* decl_props,
Handle<String>* out,
bool* ok);
Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
@@ -515,6 +542,9 @@ class Parser {
TryStatement* ParseTryStatement(bool* ok);
DebuggerStatement* ParseDebuggerStatement(bool* ok);
+ // Support for hamony block scoped bindings.
+ Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
+
Expression* ParseExpression(bool accept_IN, bool* ok);
Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
@@ -533,11 +563,6 @@ class Parser {
ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
- Expression* NewCompareNode(Token::Value op,
- Expression* x,
- Expression* y,
- int position);
-
// Populate the constant properties fixed array for a materialized object
// literal.
void BuildObjectLiteralConstantProperties(
@@ -656,7 +681,7 @@ class Parser {
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
// Parser support
- VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+ VariableProxy* Declare(Handle<String> name, VariableMode mode,
FunctionLiteral* fun,
bool resolve,
bool* ok);
@@ -670,11 +695,12 @@ class Parser {
// Factory methods.
Statement* EmptyStatement() {
- static v8::internal::EmptyStatement empty;
- return &empty;
+ static v8::internal::EmptyStatement* empty =
+ ::new v8::internal::EmptyStatement();
+ return empty;
}
- Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+ Scope* NewScope(Scope* parent, ScopeType type);
Handle<String> LookupSymbol(int symbol_id);
@@ -712,33 +738,34 @@ class Parser {
Handle<String> type,
Vector< Handle<Object> > arguments);
+ preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
+ SingletonLogger* logger);
+
Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
- JavaScriptScanner scanner_;
-
+ Scanner scanner_;
+ preparser::PreParser* reusable_preparser_;
Scope* top_scope_;
- int with_nesting_level_;
-
- LexicalScope* lexical_scope_;
- Mode mode_;
-
+ FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
- bool allow_natives_syntax_;
v8::Extension* extension_;
- bool is_pre_parsing_;
ScriptDataImpl* pre_data_;
FuncNameInferrer* fni_;
+
+ Mode mode_;
+ bool allow_natives_syntax_;
+ bool allow_lazy_;
bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
// Heuristically that means that the function will be called immediately,
// so never lazily compile it.
bool parenthesized_function_;
- bool harmony_block_scoping_;
- friend class LexicalScope;
+ friend class BlockState;
+ friend class FunctionState;
};
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 685ec3c78..20bd83793 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -333,44 +333,126 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
+ address_ = ReserveRegion(size);
size_ = size;
}
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
}
}
bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
+ return address_ != NULL;
}
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
+ kMmapFd,
+ kMmapFdOffset)) {
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(base, size);
return true;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index b152dae9a..e72d095b0 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -78,30 +78,6 @@ double ceiling(double x) {
static Mutex* limit_mutex = NULL;
-static void* GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc).
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
void OS::Setup() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
@@ -381,9 +357,9 @@ size_t OS::AllocateAlignment() {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+ const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = GetRandomMmapAddr();
+ void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
@@ -453,7 +429,12 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
int size = ftell(file);
void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -468,13 +449,18 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
return NULL;
}
void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
@@ -553,10 +539,14 @@ void OS::SignalCodeMovingGC() {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(kGCFakeMmap, "w+");
- void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
- fileno(f), 0);
+ void* addr = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_EXEC,
+ MAP_PRIVATE,
+ fileno(f),
+ 0);
ASSERT(addr != MAP_FAILED);
- munmap(addr, size);
+ OS::Free(addr, size);
fclose(f);
}
@@ -598,44 +588,126 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
+ address_ = ReserveRegion(size);
size_ = size;
}
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
}
}
bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
+ kMmapFd,
+ kMmapFdOffset)) {
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(base, size);
return true;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
}
@@ -696,7 +768,8 @@ void Thread::Start() {
pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
attr_ptr = &attr;
}
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ CHECK_EQ(0, result);
ASSERT(data_->thread_ != kNoThread);
}
@@ -892,7 +965,6 @@ static int GetThreadID() {
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-#ifndef V8_HOST_ARCH_MIPS
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
@@ -934,15 +1006,14 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif
+#endif // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
- sample.pc = reinterpret_cast<Address>(mcontext.pc);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif
+ sample->pc = reinterpret_cast<Address>(mcontext.pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
-#endif
}
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 6be941a08..6e5d29da2 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -94,12 +94,8 @@ static Mutex* limit_mutex = NULL;
void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
}
@@ -148,9 +144,12 @@ void* OS::Allocate(const size_t requested,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot,
+ void* mbase = mmap(OS::GetRandomMmapAddr(),
+ msize,
+ prot,
MAP_PRIVATE | MAP_ANON,
- kMmapFd, kMmapFdOffset);
+ kMmapFd,
+ kMmapFdOffset);
if (mbase == MAP_FAILED) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
@@ -207,7 +206,12 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
int size = ftell(file);
void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -222,13 +226,18 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
return NULL;
}
void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
@@ -334,33 +343,102 @@ int OS::StackWalk(Vector<StackFrame> frames) {
}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
- size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
}
}
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
+ return address_ != NULL;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+ size_t size,
+ bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
+ if (MAP_FAILED == mmap(address,
+ size,
+ prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
+ kMmapFd,
+ kMmapFdOffset)) {
return false;
}
@@ -370,9 +448,22 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+ return mmap(address,
+ size,
+ PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
}
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 973329b9b..b3f4924ee 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,79 +33,99 @@
#include <signal.h>
#include <sys/time.h>
#include <sys/resource.h>
+#include <sys/syscall.h>
#include <sys/types.h>
#include <stdlib.h>
#include <sys/types.h> // mmap & munmap
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
-#include <sys/fcntl.h> // open
-#include <unistd.h> // getpagesize
+#include <fcntl.h> // open
+#include <unistd.h> // sysconf
#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
-#include <limits.h>
#undef MAP_TYPE
#include "v8.h"
-#include "v8threads.h"
#include "platform.h"
+#include "v8threads.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on OpenBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
+// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
static const pthread_t kNoThread = (pthread_t) 0;
double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
+ return ceil(x);
}
static Mutex* limit_mutex = NULL;
-void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+static void* GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+ uint64_t rnd1 = V8::RandomPrivate(isolate);
+ uint64_t rnd2 = V8::RandomPrivate(isolate);
+ uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ uint32_t raw_addr = V8::RandomPrivate(isolate);
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+ raw_addr &= 0x3ffff000;
+ raw_addr += 0x20000000;
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
+void OS::Setup() {
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
+ srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // OpenBSD runs on anything.
+ return 0;
}
int OS::ActivationFrameAlignment() {
- // 16 byte alignment on OpenBSD
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
return 16;
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ // An x86 store acts as a release barrier.
+ *ptr = value;
+}
+
+
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -150,19 +170,20 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
size_t OS::AllocateAlignment() {
- return getpagesize();
+ return sysconf(_SC_PAGESIZE);
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
- bool executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -171,9 +192,9 @@ void* OS::Allocate(const size_t requested,
}
-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(buf, length);
+ int result = munmap(address, size);
USE(result);
ASSERT(result == 0);
}
@@ -192,13 +213,7 @@ void OS::Abort() {
void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
asm("int $3");
-#endif
}
@@ -245,61 +260,95 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-
-
void OS::LogSharedLibraryAddresses() {
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ i::Isolate* isolate = ISOLATE;
+ // This loop will terminate once the scanning hits an EOF.
while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to setup
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
}
- close(fd);
+ free(lib_name);
+ fclose(fp);
}
+static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+
+
void OS::SignalCodeMovingGC() {
+ // Support for ll_prof.py.
+ //
+ // The Linux profiler built into the kernel logs all mmap's with
+ // PROT_EXEC so that analysis tools can properly attribute ticks. We
+ // do a mmap with a name known by ll_prof.py and immediately munmap
+ // it. This injects a GC marker into the stream of events generated
+ // by the kernel and allows us to synchronize V8 code log and the
+ // kernel log.
+ int size = sysconf(_SC_PAGESIZE);
+ FILE* f = fopen(kGCFakeMmap, "w+");
+ void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+ fileno(f), 0);
+ ASSERT(addr != MAP_FAILED);
+ OS::Free(addr, size);
+ fclose(f);
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ // backtrace is a glibc extension.
int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size);
@@ -331,62 +380,145 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
+ address_ = ReserveRegion(size);
size_ = size;
}
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
}
}
bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
+ return address_ != NULL;
}
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
+ kMmapFd,
+ kMmapFdOffset)) {
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(base, size);
return true;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
}
class Thread::PlatformData : public Malloced {
public:
+ PlatformData() : thread_(kNoThread) {}
+
pthread_t thread_; // Thread handle for pthread.
};
-
Thread::Thread(const Options& options)
- : data_(new PlatformData),
+ : data_(new PlatformData()),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
- : data_(new PlatformData),
+ : data_(new PlatformData()),
stack_size_(0) {
set_name(name);
}
@@ -402,6 +534,11 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
+#ifdef PR_SET_NAME
+ prctl(PR_SET_NAME,
+ reinterpret_cast<unsigned long>(thread->name()), // NOLINT
+ 0, 0, 0);
+#endif
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
@@ -477,6 +614,7 @@ class OpenBSDMutex : public Mutex {
ASSERT(result == 0);
result = pthread_mutex_init(&mutex_, &attrs);
ASSERT(result == 0);
+ USE(result);
}
virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
@@ -533,6 +671,14 @@ void OpenBSDSemaphore::Wait() {
}
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+} while (false)
+#endif
+
+
bool OpenBSDSemaphore::Wait(int timeout) {
const long kOneSecondMicros = 1000000; // NOLINT
@@ -566,29 +712,15 @@ bool OpenBSDSemaphore::Wait(int timeout) {
}
}
-
Semaphore* OS::CreateSemaphore(int count) {
return new OpenBSDSemaphore(count);
}
static pthread_t GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
+ return pthread_self();
}
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
@@ -620,16 +752,23 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+
class SignalSender : public Thread {
public:
enum SleepInterval {
@@ -639,21 +778,31 @@ class SignalSender : public Thread {
explicit SignalSender(int interval)
: Thread("SignalSender"),
+ vm_tgid_(getpid()),
interval_(interval) {}
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->Start();
} else {
@@ -668,12 +817,7 @@ class SignalSender : public Thread {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
+ RestoreSignalHandler();
}
}
@@ -685,6 +829,11 @@ class SignalSender : public Thread {
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
@@ -751,6 +900,7 @@ class SignalSender : public Thread {
USE(result);
}
+ const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
@@ -763,6 +913,7 @@ class SignalSender : public Thread {
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
+
Mutex* SignalSender::mutex_ = OS::CreateMutex();
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 52cf02963..cccf0acb3 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -46,9 +46,9 @@
#undef MAP_TYPE
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
-#include <utils/Log.h> // LOG_PRI_VA
+#include <android/log.h>
#endif
#include "v8.h"
@@ -84,6 +84,34 @@ void OS::Guard(void* address, const size_t size) {
#endif // __CYGWIN__
+void* OS::GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+ uint64_t rnd1 = V8::RandomPrivate(isolate);
+ uint64_t rnd2 = V8::RandomPrivate(isolate);
+ uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ uint32_t raw_addr = V8::RandomPrivate(isolate);
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
+ raw_addr += 0x20000000;
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
+}
+
+
// ----------------------------------------------------------------------------
// Math functions
@@ -182,7 +210,7 @@ void OS::Print(const char* format, ...) {
void OS::VPrint(const char* format, va_list args) {
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
#else
vprintf(format, args);
#endif
@@ -199,7 +227,7 @@ void OS::FPrint(FILE* out, const char* format, ...) {
void OS::VFPrint(FILE* out, const char* format, va_list args) {
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
#else
vfprintf(out, format, args);
#endif
@@ -216,7 +244,7 @@ void OS::PrintError(const char* format, ...) {
void OS::VPrintError(const char* format, va_list args) {
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+ __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
#else
vfprintf(stderr, format, args);
#endif
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 97788e2f6..8771c4367 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -1397,41 +1397,101 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
}
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- size_ = size;
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ Address base = RoundUp(static_cast<Address>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<Address>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+ bool result = ReleaseRegion(address_, size_);
+ ASSERT(result);
+ USE(result);
}
}
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ if (CommitRegion(address, size, is_executable)) {
+ UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
+ return true;
+ }
+ return false;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
+ UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return VirtualFree(address, size, MEM_DECOMMIT) != false;
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+
// ----------------------------------------------------------------------------
// Win32 thread support.
@@ -1453,6 +1513,7 @@ class Thread::PlatformData : public Malloced {
public:
explicit PlatformData(HANDLE thread) : thread_(thread) {}
HANDLE thread_;
+ unsigned thread_id_;
};
@@ -1496,13 +1557,15 @@ void Thread::Start() {
ThreadEntry,
this,
0,
- NULL));
+ &data_->thread_id_));
}
// Wait for thread to terminate.
void Thread::Join() {
- WaitForSingleObject(data_->thread_, INFINITE);
+ if (data_->thread_id_ != GetCurrentThreadId()) {
+ WaitForSingleObject(data_->thread_, INFINITE);
+ }
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 034fe3404..f84b6b17a 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -178,6 +178,9 @@ class OS {
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
+ // Generate a random address to be used for hinting mmap().
+ static void* GetRandomMmapAddr();
+
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
@@ -301,23 +304,46 @@ class OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
-
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
class VirtualMemory {
public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory();
+
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size);
+
+ // Reserves virtual memory containing an area of the given size that
+ // is aligned per alignment. This may not be at the position returned
+ // by address().
+ VirtualMemory(size_t size, size_t alignment);
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved();
+ // Initialize or resets an embedded VirtualMemory object.
+ void Reset();
+
// Returns the start address of the reserved memory.
+ // If the memory was reserved with an alignment, this address is not
+ // necessarily aligned. The user might need to round it up to a multiple of
+ // the alignment to get the start of the aligned block.
void* address() {
ASSERT(IsReserved());
return address_;
}
- // Returns the size of the reserved memory.
+ // Returns the size of the reserved memory. The returned value is only
+ // meaningful when IsReserved() returns true.
+ // If the memory was reserved with an alignment, this size may be larger
+ // than the requested size.
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
@@ -326,11 +352,43 @@ class VirtualMemory {
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
+ void Release() {
+ ASSERT(IsReserved());
+ // Notice: Order is important here. The VirtualMemory object might live
+ // inside the allocated region.
+ void* address = address_;
+ size_t size = size_;
+ Reset();
+ bool result = ReleaseRegion(address, size);
+ USE(result);
+ ASSERT(result);
+ }
+
+ // Assign control of the reserved region to a different VirtualMemory object.
+ // The old object is no longer functional (IsReserved() returns false).
+ void TakeControl(VirtualMemory* from) {
+ ASSERT(!IsReserved());
+ address_ = from->address_;
+ size_ = from->size_;
+ from->Reset();
+ }
+
+ static void* ReserveRegion(size_t size);
+
+ static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+ static bool UncommitRegion(void* base, size_t size);
+
+ // Must be called with a base pointer that has been returned by ReserveRegion
+ // and the same size it was reserved with.
+ static bool ReleaseRegion(void* base, size_t size);
+
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
+
// ----------------------------------------------------------------------------
// Thread
//
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index c6503c4fc..c77a47a10 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -49,7 +49,7 @@ class ParserRecorder {
int end,
int literals,
int properties,
- int strict_mode) = 0;
+ LanguageMode language_mode) = 0;
// Logs a symbol creation of a literal or identifier.
virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
@@ -89,12 +89,12 @@ class FunctionLoggingParserRecorder : public ParserRecorder {
int end,
int literals,
int properties,
- int strict_mode) {
+ LanguageMode language_mode) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
- function_store_.Add(strict_mode);
+ function_store_.Add(language_mode);
}
// Logs an error message and marks the log as containing an error.
diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc
index 899489e25..1bca9a333 100644
--- a/deps/v8/src/preparser-api.cc
+++ b/deps/v8/src/preparser-api.cc
@@ -182,13 +182,13 @@ PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
internal::InputStreamUTF16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
internal::UnicodeCache unicode_cache;
- internal::JavaScriptScanner scanner(&unicode_cache);
+ internal::Scanner scanner(&unicode_cache);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
preparser::PreParser::PreParseResult result =
preparser::PreParser::PreParseProgram(&scanner,
&recorder,
- true,
+ internal::kAllowLazy,
stack_limit);
if (result == preparser::PreParser::kPreParseStackOverflow) {
return PreParserData::StackOverflow();
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 47d21bac1..49cadb661 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -52,6 +52,34 @@ int isfinite(double value);
namespace preparser {
+PreParser::PreParseResult PreParser::PreParseLazyFunction(
+ i::LanguageMode mode, i::ParserRecorder* log) {
+ log_ = log;
+ // Lazy functions always have trivial outer scopes (no with/catch scopes).
+ Scope top_scope(&scope_, kTopLevelScope);
+ set_language_mode(mode);
+ Scope function_scope(&scope_, kFunctionScope);
+ ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
+ bool ok = true;
+ int start_position = scanner_->peek_location().beg_pos;
+ ParseLazyFunctionLiteralBody(&ok);
+ if (stack_overflow_) return kPreParseStackOverflow;
+ if (!ok) {
+ ReportUnexpectedToken(scanner_->current_token());
+ } else {
+ ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+ if (!is_classic_mode()) {
+ int end_pos = scanner_->location().end_pos;
+ CheckOctalLiteral(start_position, end_pos, &ok);
+ if (ok) {
+ CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
+ }
+ }
+ }
+ return kPreParseSuccess;
+}
+
+
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
// See preparser-data.h for the data.
@@ -72,7 +100,7 @@ void PreParser::ReportUnexpectedToken(i::Token::Value token) {
if (token == i::Token::ILLEGAL && stack_overflow_) {
return;
}
- i::JavaScriptScanner::Location source_location = scanner_->location();
+ i::Scanner::Location source_location = scanner_->location();
// Four of the tokens are treated specially
switch (token) {
@@ -117,8 +145,21 @@ void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
+ // (Ecma 262 5th Edition, clause 14):
+ // SourceElement:
+ // Statement
+ // FunctionDeclaration
+ //
+ // In harmony mode we allow additionally the following productions
+ // SourceElement:
+ // LetDeclaration
+ // ConstDeclaration
+
switch (peek()) {
+ case i::Token::FUNCTION:
+ return ParseFunctionDeclaration(ok);
case i::Token::LET:
+ case i::Token::CONST:
return ParseVariableStatement(kSourceElement, ok);
default:
return ParseStatement(ok);
@@ -136,7 +177,8 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
Statement statement = ParseSourceElement(CHECK_OK);
if (allow_directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_strict_mode();
+ set_language_mode(harmony_scoping_ ?
+ i::EXTENDED_MODE : i::STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
allow_directive_prologue = false;
}
@@ -185,6 +227,7 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
return ParseBlock(ok);
case i::Token::CONST:
+ case i::Token::LET:
case i::Token::VAR:
return ParseVariableStatement(kStatement, ok);
@@ -225,8 +268,19 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
case i::Token::TRY:
return ParseTryStatement(ok);
- case i::Token::FUNCTION:
- return ParseFunctionDeclaration(ok);
+ case i::Token::FUNCTION: {
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseFunctionDeclaration(CHECK_OK);
+ i::Scanner::Location end_location = scanner_->location();
+ if (!is_classic_mode()) {
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ return Statement::Default();
+ } else {
+ return statement;
+ }
+ }
case i::Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -271,14 +325,10 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(i::Token::LBRACE, CHECK_OK);
while (peek() != i::Token::RBRACE) {
- i::Scanner::Location start_location = scanner_->peek_location();
- Statement statement = ParseSourceElement(CHECK_OK);
- i::Scanner::Location end_location = scanner_->location();
- if (strict_mode() && statement.IsFunctionDeclaration()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
- *ok = false;
- return Statement::Default();
+ if (is_extended_mode()) {
+ ParseSourceElement(CHECK_OK);
+ } else {
+ ParseStatement(CHECK_OK);
}
}
Expect(i::Token::RBRACE, ok);
@@ -294,6 +344,7 @@ PreParser::Statement PreParser::ParseVariableStatement(
Statement result = ParseVariableDeclarations(var_context,
NULL,
+ NULL,
CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
@@ -307,22 +358,73 @@ PreParser::Statement PreParser::ParseVariableStatement(
// of 'for-in' loops.
PreParser::Statement PreParser::ParseVariableDeclarations(
VariableDeclarationContext var_context,
+ VariableDeclarationProperties* decl_props,
int* num_decl,
bool* ok) {
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
+ //
+ // The ES6 Draft Rev3 specifies the following grammar for const declarations
+ //
+ // ConstDeclaration ::
+ // const ConstBinding (',' ConstBinding)* ';'
+ // ConstBinding ::
+ // Identifier '=' AssignmentExpression
+ //
+ // TODO(ES6):
+ // ConstBinding ::
+ // BindingPattern '=' AssignmentExpression
+ bool require_initializer = false;
if (peek() == i::Token::VAR) {
Consume(i::Token::VAR);
} else if (peek() == i::Token::CONST) {
- if (strict_mode()) {
+ // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
+ //
+ // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
+ //
+ // * It is a Syntax Error if the code that matches this production is not
+ // contained in extended code.
+ //
+ // However disallowing const in classic mode will break compatibility with
+ // existing pages. Therefore we keep allowing const with the old
+ // non-harmony semantics in classic mode.
+ Consume(i::Token::CONST);
+ switch (language_mode()) {
+ case i::CLASSIC_MODE:
+ break;
+ case i::STRICT_MODE: {
+ i::Scanner::Location location = scanner_->peek_location();
+ ReportMessageAt(location, "strict_const", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
+ case i::EXTENDED_MODE:
+ if (var_context != kSourceElement &&
+ var_context != kForStatement) {
+ i::Scanner::Location location = scanner_->peek_location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "unprotected_const", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
+ require_initializer = true;
+ break;
+ }
+ } else if (peek() == i::Token::LET) {
+ // ES6 Draft Rev4 section 12.2.1:
+ //
+ // LetDeclaration : let LetBindingList ;
+ //
+ // * It is a Syntax Error if the code that matches this production is not
+ // contained in extended code.
+ if (!is_extended_mode()) {
i::Scanner::Location location = scanner_->peek_location();
- ReportMessageAt(location, "strict_const", NULL);
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "illegal_let", NULL);
*ok = false;
return Statement::Default();
}
- Consume(i::Token::CONST);
- } else if (peek() == i::Token::LET) {
+ Consume(i::Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
i::Scanner::Location location = scanner_->peek_location();
@@ -331,7 +433,6 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
*ok = false;
return Statement::Default();
}
- Consume(i::Token::LET);
} else {
*ok = false;
return Statement::Default();
@@ -346,7 +447,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// Parse variable name.
if (nvars > 0) Consume(i::Token::COMMA);
Identifier identifier = ParseIdentifier(CHECK_OK);
- if (strict_mode() && !identifier.IsValidStrictVariable()) {
+ if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
StrictModeIdentifierViolation(scanner_->location(),
"strict_var_name",
identifier,
@@ -354,9 +455,10 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
return Statement::Default();
}
nvars++;
- if (peek() == i::Token::ASSIGN) {
+ if (peek() == i::Token::ASSIGN || require_initializer) {
Expect(i::Token::ASSIGN, CHECK_OK);
ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
+ if (decl_props != NULL) *decl_props = kHasInitializers;
}
} while (peek() == i::Token::COMMA);
@@ -372,18 +474,11 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
Expression expr = ParseExpression(true, CHECK_OK);
if (expr.IsRawIdentifier()) {
- if (peek() == i::Token::COLON &&
- (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
+ ASSERT(!expr.AsIdentifier().IsFutureReserved());
+ ASSERT(is_classic_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
+ if (peek() == i::Token::COLON) {
Consume(i::Token::COLON);
- i::Scanner::Location start_location = scanner_->peek_location();
- Statement statement = ParseStatement(CHECK_OK);
- if (strict_mode() && statement.IsFunctionDeclaration()) {
- i::Scanner::Location end_location = scanner_->location();
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
- *ok = false;
- }
- return Statement::Default();
+ return ParseStatement(ok);
}
// Preparsing is disabled for extensions (because the extension details
// aren't passed to lazily compiled functions), so we don't
@@ -476,7 +571,7 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(i::Token::WITH, CHECK_OK);
- if (strict_mode()) {
+ if (!is_classic_mode()) {
i::Scanner::Location location = scanner_->location();
ReportMessageAt(location, "strict_mode_with", NULL);
*ok = false;
@@ -513,15 +608,7 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
Expect(i::Token::DEFAULT, CHECK_OK);
Expect(i::Token::COLON, CHECK_OK);
} else {
- i::Scanner::Location start_location = scanner_->peek_location();
- Statement statement = ParseStatement(CHECK_OK);
- if (strict_mode() && statement.IsFunctionDeclaration()) {
- i::Scanner::Location end_location = scanner_->location();
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
- *ok = false;
- return Statement::Default();
- }
+ ParseStatement(CHECK_OK);
}
token = peek();
}
@@ -566,9 +653,14 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
if (peek() != i::Token::SEMICOLON) {
if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
peek() == i::Token::LET) {
+ bool is_let = peek() == i::Token::LET;
int decl_count;
- ParseVariableDeclarations(kForStatement, &decl_count, CHECK_OK);
- if (peek() == i::Token::IN && decl_count == 1) {
+ VariableDeclarationProperties decl_props = kHasNoInitializers;
+ ParseVariableDeclarations(
+ kForStatement, &decl_props, &decl_count, CHECK_OK);
+ bool accept_IN = decl_count == 1 &&
+ !(is_let && decl_props == kHasInitializers);
+ if (peek() == i::Token::IN && accept_IN) {
Expect(i::Token::IN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
@@ -613,7 +705,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(i::Token::THROW, CHECK_OK);
if (scanner_->HasAnyLineTerminatorBeforeNext()) {
- i::JavaScriptScanner::Location pos = scanner_->location();
+ i::Scanner::Location pos = scanner_->location();
ReportMessageAt(pos, "newline_after_throw", NULL);
*ok = false;
return Statement::Default();
@@ -648,7 +740,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Consume(i::Token::CATCH);
Expect(i::Token::LPAREN, CHECK_OK);
Identifier id = ParseIdentifier(CHECK_OK);
- if (strict_mode() && !id.IsValidStrictVariable()) {
+ if (!is_classic_mode() && !id.IsValidStrictVariable()) {
StrictModeIdentifierViolation(scanner_->location(),
"strict_catch_variable",
id,
@@ -726,7 +818,8 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
return expression;
}
- if (strict_mode() && expression.IsIdentifier() &&
+ if (!is_classic_mode() &&
+ expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
i::Scanner::Location after = scanner_->location();
ReportMessageAt(before.beg_pos, after.end_pos,
@@ -814,7 +907,8 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
op = Next();
i::Scanner::Location before = scanner_->peek_location();
Expression expression = ParseUnaryExpression(CHECK_OK);
- if (strict_mode() && expression.IsIdentifier() &&
+ if (!is_classic_mode() &&
+ expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
i::Scanner::Location after = scanner_->location();
ReportMessageAt(before.beg_pos, after.end_pos,
@@ -836,7 +930,8 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
i::Token::IsCountOp(peek())) {
- if (strict_mode() && expression.IsIdentifier() &&
+ if (!is_classic_mode() &&
+ expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
i::Scanner::Location after = scanner_->location();
ReportMessageAt(before.beg_pos, after.end_pos,
@@ -1023,7 +1118,7 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
}
case i::Token::FUTURE_STRICT_RESERVED_WORD:
- if (strict_mode()) {
+ if (!is_classic_mode()) {
Next();
i::Scanner::Location location = scanner_->location();
ReportMessageAt(location, "strict_reserved_word", NULL);
@@ -1123,7 +1218,7 @@ void PreParser::CheckDuplicate(DuplicateFinder* finder,
if (HasConflict(old_type, type)) {
if (IsDataDataConflict(old_type, type)) {
// Both are data properties.
- if (!strict_mode()) return;
+ if (is_classic_mode()) return;
ReportMessageAt(scanner_->location(),
"strict_duplicate_property", NULL);
} else if (IsDataAccessorConflict(old_type, type)) {
@@ -1306,9 +1401,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
}
Expect(i::Token::RPAREN, CHECK_OK);
- Expect(i::Token::LBRACE, CHECK_OK);
- int function_block_pos = scanner_->location().beg_pos;
-
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
@@ -1317,26 +1409,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
!parenthesized_function_);
parenthesized_function_ = false;
+ Expect(i::Token::LBRACE, CHECK_OK);
if (is_lazily_compiled) {
- log_->PauseRecording();
- ParseSourceElements(i::Token::RBRACE, ok);
- log_->ResumeRecording();
- if (!*ok) Expression::Default();
-
- Expect(i::Token::RBRACE, CHECK_OK);
-
- // Position right after terminal '}'.
- int end_pos = scanner_->location().end_pos;
- log_->LogFunction(function_block_pos, end_pos,
- function_scope.materialized_literal_count(),
- function_scope.expected_properties(),
- strict_mode() ? 1 : 0);
+ ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
- ParseSourceElements(i::Token::RBRACE, CHECK_OK);
- Expect(i::Token::RBRACE, CHECK_OK);
+ ParseSourceElements(i::Token::RBRACE, ok);
}
+ Expect(i::Token::RBRACE, CHECK_OK);
- if (strict_mode()) {
+ if (!is_classic_mode()) {
int end_position = scanner_->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
@@ -1347,11 +1428,31 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
}
+void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
+ int body_start = scanner_->location().beg_pos;
+ log_->PauseRecording();
+ ParseSourceElements(i::Token::RBRACE, ok);
+ log_->ResumeRecording();
+ if (!*ok) return;
+
+ // Position right after terminal '}'.
+ ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+ int body_end = scanner_->peek_location().end_pos;
+ log_->LogFunction(body_start, body_end,
+ scope_->materialized_literal_count(),
+ scope_->expected_properties(),
+ language_mode());
+}
+
+
PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
-
Expect(i::Token::MOD, CHECK_OK);
+ if (!allow_natives_syntax_) {
+ *ok = false;
+ return Expression::Default();
+ }
ParseIdentifier(CHECK_OK);
ParseArguments(ok);
@@ -1434,9 +1535,16 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
ReportMessageAt(location.beg_pos, location.end_pos,
"reserved_word", NULL);
*ok = false;
+ return GetIdentifierSymbol();
}
- // FALLTHROUGH
case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ if (!is_classic_mode()) {
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "strict_reserved_word", NULL);
+ *ok = false;
+ }
+ // FALLTHROUGH
case i::Token::IDENTIFIER:
return GetIdentifierSymbol();
default:
@@ -1449,7 +1557,7 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
void PreParser::SetStrictModeViolation(i::Scanner::Location location,
const char* type,
bool* ok) {
- if (strict_mode()) {
+ if (!is_classic_mode()) {
ReportMessageAt(location, type, NULL);
*ok = false;
return;
@@ -1489,7 +1597,7 @@ void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
} else if (identifier.IsFutureStrictReserved()) {
type = "strict_reserved_word";
}
- if (strict_mode()) {
+ if (!is_classic_mode()) {
ReportMessageAt(location, type, NULL);
*ok = false;
return;
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index b97b7cff6..fc8a4a0ca 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -110,19 +110,51 @@ class PreParser {
kPreParseSuccess
};
+
+ PreParser(i::Scanner* scanner,
+ i::ParserRecorder* log,
+ uintptr_t stack_limit,
+ bool allow_lazy,
+ bool allow_natives_syntax)
+ : scanner_(scanner),
+ log_(log),
+ scope_(NULL),
+ stack_limit_(stack_limit),
+ strict_mode_violation_location_(i::Scanner::Location::invalid()),
+ strict_mode_violation_type_(NULL),
+ stack_overflow_(false),
+ allow_lazy_(allow_lazy),
+ allow_natives_syntax_(allow_natives_syntax),
+ parenthesized_function_(false),
+ harmony_scoping_(scanner->HarmonyScoping()) { }
+
~PreParser() {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
+ static PreParseResult PreParseProgram(i::Scanner* scanner,
i::ParserRecorder* log,
- bool allow_lazy,
+ int flags,
uintptr_t stack_limit) {
- return PreParser(scanner, log, stack_limit, allow_lazy).PreParse();
+ bool allow_lazy = (flags & i::kAllowLazy) != 0;
+ bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
+ return PreParser(scanner, log, stack_limit,
+ allow_lazy, allow_natives_syntax).PreParse();
}
+ // Parses a single function literal, from the opening parentheses before
+ // parameters to the closing brace after the body.
+ // Returns a FunctionEntry describing the body of the funciton in enough
+ // detail that it can be lazily compiled.
+ // The scanner is expected to have matched the "function" keyword and
+ // parameters, and have consumed the initial '{'.
+ // At return, unless an error occured, the scanner is positioned before the
+ // the final '}'.
+ PreParseResult PreParseLazyFunction(i::LanguageMode mode,
+ i::ParserRecorder* log);
+
private:
// Used to detect duplicates in object literals. Each of the values
// kGetterProperty, kSetterProperty and kValueProperty represents
@@ -179,6 +211,12 @@ class PreParser {
kForStatement
};
+ // If a list of variable declarations includes any initializers.
+ enum VariableDeclarationProperties {
+ kHasInitializers,
+ kHasNoInitializers
+ };
+
class Expression;
class Identifier {
@@ -408,7 +446,8 @@ class PreParser {
materialized_literal_count_(0),
expected_properties_(0),
with_nesting_count_(0),
- strict_((prev_ != NULL) && prev_->is_strict()) {
+ language_mode_(
+ (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE) {
*variable = this;
}
~Scope() { *variable_ = prev_; }
@@ -418,8 +457,15 @@ class PreParser {
int expected_properties() { return expected_properties_; }
int materialized_literal_count() { return materialized_literal_count_; }
bool IsInsideWith() { return with_nesting_count_ != 0; }
- bool is_strict() { return strict_; }
- void set_strict() { strict_ = true; }
+ bool is_classic_mode() {
+ return language_mode_ == i::CLASSIC_MODE;
+ }
+ i::LanguageMode language_mode() {
+ return language_mode_;
+ }
+ void set_language_mode(i::LanguageMode language_mode) {
+ language_mode_ = language_mode;
+ }
void EnterWith() { with_nesting_count_++; }
void LeaveWith() { with_nesting_count_--; }
@@ -430,25 +476,9 @@ class PreParser {
int materialized_literal_count_;
int expected_properties_;
int with_nesting_count_;
- bool strict_;
+ i::LanguageMode language_mode_;
};
- // Private constructor only used in PreParseProgram.
- PreParser(i::JavaScriptScanner* scanner,
- i::ParserRecorder* log,
- uintptr_t stack_limit,
- bool allow_lazy)
- : scanner_(scanner),
- log_(log),
- scope_(NULL),
- stack_limit_(stack_limit),
- strict_mode_violation_location_(i::Scanner::Location::invalid()),
- strict_mode_violation_type_(NULL),
- stack_overflow_(false),
- allow_lazy_(true),
- parenthesized_function_(false),
- harmony_block_scoping_(scanner->HarmonyBlockScoping()) { }
-
// Preparse the program. Only called in PreParseProgram after creating
// the instance.
PreParseResult PreParse() {
@@ -459,7 +489,7 @@ class PreParser {
if (stack_overflow_) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner_->current_token());
- } else if (scope_->is_strict()) {
+ } else if (!scope_->is_classic_mode()) {
CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
}
return kPreParseSuccess;
@@ -493,6 +523,7 @@ class PreParser {
Statement ParseVariableStatement(VariableDeclarationContext var_context,
bool* ok);
Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+ VariableDeclarationProperties* decl_props,
int* num_decl,
bool* ok);
Statement ParseExpressionOrLabelledStatement(bool* ok);
@@ -527,6 +558,7 @@ class PreParser {
Arguments ParseArguments(bool* ok);
Expression ParseFunctionLiteral(bool* ok);
+ void ParseLazyFunctionLiteralBody(bool* ok);
Identifier ParseIdentifier(bool* ok);
Identifier ParseIdentifierName(bool* ok);
@@ -562,11 +594,19 @@ class PreParser {
bool peek_any_identifier();
- void set_strict_mode() {
- scope_->set_strict();
+ void set_language_mode(i::LanguageMode language_mode) {
+ scope_->set_language_mode(language_mode);
+ }
+
+ bool is_classic_mode() {
+ return scope_->language_mode() == i::CLASSIC_MODE;
+ }
+
+ bool is_extended_mode() {
+ return scope_->language_mode() == i::EXTENDED_MODE;
}
- bool strict_mode() { return scope_->is_strict(); }
+ i::LanguageMode language_mode() { return scope_->language_mode(); }
void Consume(i::Token::Value token) { Next(); }
@@ -599,7 +639,7 @@ class PreParser {
Identifier identifier,
bool* ok);
- i::JavaScriptScanner* scanner_;
+ i::Scanner* scanner_;
i::ParserRecorder* log_;
Scope* scope_;
uintptr_t stack_limit_;
@@ -607,8 +647,9 @@ class PreParser {
const char* strict_mode_violation_type_;
bool stack_overflow_;
bool allow_lazy_;
+ bool allow_natives_syntax_;
bool parenthesized_function_;
- bool harmony_block_scoping_;
+ bool harmony_scoping_;
};
} } // v8::preparser
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 663af284b..37c76ceef 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -372,13 +372,6 @@ void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
}
-void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
- Print("(");
- Visit(node->expression());
- Print("%s null)", Token::String(node->op()));
-}
-
-
void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
Print("<this-function>");
}
@@ -1020,15 +1013,6 @@ void AstPrinter::VisitCompareOperation(CompareOperation* node) {
}
-void AstPrinter::VisitCompareToNull(CompareToNull* node) {
- const char* name = node->is_strict()
- ? "COMPARE-TO-NULL-STRICT"
- : "COMPARE-TO-NULL";
- IndentedScope indent(this, name, node);
- Visit(node->expression());
-}
-
-
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION");
}
@@ -1404,16 +1388,6 @@ void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
}
-void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
- TagScope tag(this, "CompareToNull");
- {
- AttributesScope attributes(this);
- AddAttribute("is_strict", expr->is_strict());
- }
- Visit(expr->expression());
-}
-
-
void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
TagScope tag(this, "ThisFunction");
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index adf55ad2e..5626acaba 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -150,9 +150,11 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
const char* StringsStorage::GetName(String* name) {
if (name->IsString()) {
- return AddOrDisposeString(
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
- name->Hash());
+ int length = Min(kMaxNameSize, name->length());
+ SmartArrayPointer<char> data =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
+ uint32_t hash = HashSequentialString(*data, length);
+ return AddOrDisposeString(data.Detach(), hash);
}
return "";
}
@@ -488,8 +490,6 @@ void CpuProfile::Print() {
CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
- CodeMap::CodeEntryInfo(NULL, 0);
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -1403,10 +1403,12 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
if (entry != NULL) {
void* value = entry->value;
entries_map_.Remove(from, AddressHash(from));
- entry = entries_map_.Lookup(to, AddressHash(to), true);
- // We can have an entry at the new location, it is OK, as GC can overwrite
- // dead objects with alive objects being moved.
- entry->value = value;
+ if (to != NULL) {
+ entry = entries_map_.Lookup(to, AddressHash(to), true);
+ // We can have an entry at the new location, it is OK, as GC can overwrite
+ // dead objects with alive objects being moved.
+ entry->value = value;
+ }
}
}
@@ -1528,6 +1530,8 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
+ // First perform a full GC in order to avoid dead objects.
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
AssertNoAllocation no_allocation;
HeapObject* object = NULL;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
@@ -1835,12 +1839,13 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
}
-int V8HeapExplorer::EstimateObjectsCount() {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
+int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
int objects_count = 0;
- for (HeapObject* obj = iterator.next();
+ for (HeapObject* obj = iterator->next();
obj != NULL;
- obj = iterator.next(), ++objects_count) {}
+ obj = iterator->next()) {
+ objects_count++;
+ }
return objects_count;
}
@@ -1913,6 +1918,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetPropertyReference(
obj, entry,
heap_->prototype_symbol(), proto_or_map,
+ NULL,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
SetPropertyReference(
@@ -1927,9 +1933,11 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(js_fun, entry,
"context", js_fun->unchecked_context(),
JSFunction::kContextOffset);
- TagObject(js_fun->literals(), "(function literals)");
+ TagObject(js_fun->literals_or_bindings(),
+ "(function literals_or_bindings)");
SetInternalReference(js_fun, entry,
- "literals", js_fun->literals(),
+ "literals_or_bindings",
+ js_fun->literals_or_bindings(),
JSFunction::kLiteralsOffset);
}
TagObject(js_obj->properties(), "(object properties)");
@@ -1946,6 +1954,10 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry, 1, cs->first());
SetInternalReference(obj, entry, 2, cs->second());
}
+ if (obj->IsSlicedString()) {
+ SlicedString* ss = SlicedString::cast(obj);
+ SetInternalReference(obj, entry, "parent", ss->parent());
+ }
extract_indexed_refs = false;
} else if (obj->IsGlobalContext()) {
Context* context = Context::cast(obj);
@@ -1968,6 +1980,14 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
}
+ if (map->prototype_transitions() != heap_->empty_fixed_array()) {
+ TagObject(map->prototype_transitions(), "(prototype transitions)");
+ SetInternalReference(obj,
+ entry,
+ "prototype_transitions",
+ map->prototype_transitions(),
+ Map::kPrototypeTransitionsOffset);
+ }
SetInternalReference(obj, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
@@ -2044,20 +2064,27 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
HeapEntry* entry) {
if (js_obj->IsJSFunction()) {
- HandleScope hs;
JSFunction* func = JSFunction::cast(js_obj);
Context* context = func->context();
- ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
- SerializedScopeInfo* serialized_scope_info =
- context->closure()->shared()->scope_info();
- ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
- int locals_number = zone_scope_info.NumberOfLocals();
- for (int i = 0; i < locals_number; ++i) {
- String* local_name = *zone_scope_info.LocalName(i);
- int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
- if (idx >= 0 && idx < context->length()) {
- SetClosureReference(js_obj, entry, local_name, context->get(idx));
- }
+ ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+
+ // Add context allocated locals.
+ int context_locals = scope_info->ContextLocalCount();
+ for (int i = 0; i < context_locals; ++i) {
+ String* local_name = scope_info->ContextLocalName(i);
+ int idx = Context::MIN_CONTEXT_SLOTS + i;
+ SetClosureReference(js_obj, entry, local_name, context->get(idx));
+ }
+
+ // Add function variable.
+ if (scope_info->HasFunctionName()) {
+ String* name = scope_info->FunctionName();
+ int idx = Context::MIN_CONTEXT_SLOTS + context_locals;
+#ifdef DEBUG
+ VariableMode mode;
+ ASSERT(idx == scope_info->FunctionContextSlotIndex(name, &mode));
+#endif
+ SetClosureReference(js_obj, entry, name, context->get(idx));
}
}
}
@@ -2075,6 +2102,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
SetPropertyReference(
js_obj, entry,
descs->GetKey(i), js_obj->InObjectPropertyAt(index),
+ NULL,
js_obj->GetInObjectPropertyOffset(index));
} else {
SetPropertyReference(
@@ -2088,7 +2116,29 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
js_obj, entry,
descs->GetKey(i), descs->GetConstantFunction(i));
break;
- default: ;
+ case CALLBACKS: {
+ Object* callback_obj = descs->GetValue(i);
+ if (callback_obj->IsFixedArray()) {
+ FixedArray* accessors = FixedArray::cast(callback_obj);
+ if (Object* getter = accessors->get(JSObject::kGetterIndex)) {
+ SetPropertyReference(js_obj, entry, descs->GetKey(i),
+ getter, "get-%s");
+ }
+ if (Object* setter = accessors->get(JSObject::kSetterIndex)) {
+ SetPropertyReference(js_obj, entry, descs->GetKey(i),
+ setter, "set-%s");
+ }
+ }
+ break;
+ }
+ case NORMAL: // only in slow mode
+ case HANDLER: // only in lookup results, not in descriptors
+ case INTERCEPTOR: // only in lookup results, not in descriptors
+ case MAP_TRANSITION: // we do not care about transitions here...
+ case ELEMENTS_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR: // ... and not about "holes"
+ break;
}
}
} else {
@@ -2153,15 +2203,16 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
String* V8HeapExplorer::GetConstructorName(JSObject* object) {
- if (object->IsJSFunction()) return HEAP->closure_symbol();
+ Heap* heap = object->GetHeap();
+ if (object->IsJSFunction()) return heap->closure_symbol();
String* constructor_name = object->constructor_name();
- if (constructor_name == HEAP->Object_symbol()) {
+ if (constructor_name == heap->Object_symbol()) {
// Look up an immediate "constructor" property, if it is a function,
// return its name. This is for instances of binding objects, which
// have prototype constructor type "Object".
Object* constructor_prop = NULL;
- LookupResult result;
- object->LocalLookupRealNamedProperty(HEAP->constructor_symbol(), &result);
+ LookupResult result(heap->isolate());
+ object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
if (result.IsProperty()) {
constructor_prop = result.GetLazyValue();
}
@@ -2198,9 +2249,11 @@ class RootsReferencesExtractor : public ObjectVisitor {
bool V8HeapExplorer::IterateAndExtractReferences(
SnapshotFillerInterface* filler) {
- filler_ = filler;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
+
+ filler_ = filler;
bool interrupted = false;
+
// Heap iteration with filtering must be finished in any case.
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -2310,15 +2363,23 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
String* reference_name,
Object* child_obj,
+ const char* name_format_string,
int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
HeapGraphEdge::Type type = reference_name->length() > 0 ?
HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
+ const char* name = name_format_string != NULL ?
+ collection_->names()->GetFormatted(
+ name_format_string,
+ *reference_name->ToCString(DISALLOW_NULLS,
+ ROBUST_STRING_TRAVERSAL)) :
+ collection_->names()->GetName(reference_name);
+
filler_->SetNamedReference(type,
parent_obj,
parent_entry,
- collection_->names()->GetName(reference_name),
+ name,
child_obj,
child_entry);
IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
@@ -2409,6 +2470,7 @@ class GlobalObjectsEnumerator : public ObjectVisitor {
// Modifies heap. Must not be run during heap traversal.
void V8HeapExplorer::TagGlobalObjects() {
+ HandleScope scope;
Isolate* isolate = Isolate::Current();
GlobalObjectsEnumerator enumerator;
isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -2419,6 +2481,7 @@ void V8HeapExplorer::TagGlobalObjects() {
const char** urls = NewArray<const char*>(enumerator.count());
for (int i = 0, l = enumerator.count(); i < l; ++i) {
urls[i] = NULL;
+ HandleScope scope;
Handle<JSGlobalObject> global_obj = enumerator.at(i);
Object* obj_document;
if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
@@ -2766,13 +2829,43 @@ class SnapshotFiller : public SnapshotFillerInterface {
bool HeapSnapshotGenerator::GenerateSnapshot() {
v8_heap_explorer_.TagGlobalObjects();
+ // TODO(1562) Profiler assumes that any object that is in the heap after
+ // full GC is reachable from the root when computing dominators.
+ // This is not true for weakly reachable objects.
+ // As a temporary solution we call GC twice.
+ Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+
+#ifdef DEBUG
+ Heap* debug_heap = Isolate::Current()->heap();
+ ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
+ ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
+ ASSERT(!debug_heap->code_space()->was_swept_conservatively());
+ ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
+ ASSERT(!debug_heap->map_space()->was_swept_conservatively());
+#endif
+
+ // The following code uses heap iterators, so we want the heap to be
+ // stable. It should follow TagGlobalObjects as that can allocate.
AssertNoAllocation no_alloc;
+#ifdef DEBUG
+ debug_heap->Verify();
+#endif
+
SetProgressTotal(4); // 2 passes + dominators + sizes.
+#ifdef DEBUG
+ debug_heap->Verify();
+#endif
+
// Pass 1. Iterate heap contents to count entries and references.
if (!CountEntriesAndReferences()) return false;
+#ifdef DEBUG
+ debug_heap->Verify();
+#endif
+
// Allocate and fill entries in the snapshot, allocate references.
snapshot_->AllocateEntries(entries_.entries_count(),
entries_.total_children_count(),
@@ -2810,8 +2903,9 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == NULL) return;
+ HeapIterator iterator(HeapIterator::kFilterUnreachable);
progress_total_ = (
- v8_heap_explorer_.EstimateObjectsCount() +
+ v8_heap_explorer_.EstimateObjectsCount(&iterator) +
dom_explorer_.EstimateObjectsCount()) * iterations_count;
progress_counter_ = 0;
}
@@ -2861,7 +2955,7 @@ void HeapSnapshotGenerator::FillReversePostorderIndexes(
nodes_to_visit.RemoveLast();
}
}
- entries->Truncate(current_entry);
+ ASSERT_EQ(current_entry, entries->length());
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index da1fdc33e..44be3db78 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -74,6 +74,8 @@ class StringsStorage {
inline const char* GetFunctionName(const char* name);
private:
+ static const int kMaxNameSize = 1024;
+
INLINE(static bool StringsMatch(void* key1, void* key2)) {
return strcmp(reinterpret_cast<char*>(key1),
reinterpret_cast<char*>(key2)) == 0;
@@ -257,7 +259,7 @@ class CodeMap {
typedef Address Key;
typedef CodeEntryInfo Value;
static const Key kNoKey;
- static const Value kNoValue;
+ static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
static int Compare(const Key& a, const Key& b) {
return a < b ? -1 : (a > b ? 1 : 0);
}
@@ -550,7 +552,10 @@ class HeapEntry BASE_EMBEDDED {
Vector<HeapGraphEdge*> retainers() {
return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
HeapEntry* dominator() { return dominator_; }
- void set_dominator(HeapEntry* entry) { dominator_ = entry; }
+ void set_dominator(HeapEntry* entry) {
+ ASSERT(entry != NULL);
+ dominator_ = entry;
+ }
void clear_paint() { painted_ = kUnpainted; }
bool painted_reachable() { return painted_ == kPainted; }
@@ -920,7 +925,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
virtual HeapEntry* AllocateEntry(
HeapThing ptr, int children_count, int retainers_count);
void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount();
+ int EstimateObjectsCount(HeapIterator* iterator);
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
void TagGlobalObjects();
@@ -968,6 +973,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* parent,
String* reference_name,
Object* child,
+ const char* name_format_string = NULL,
int field_offset = -1);
void SetPropertyShortcutReference(HeapObject* parent_obj,
HeapEntry* parent,
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
new file mode 100644
index 000000000..135c2ca01
--- /dev/null
+++ b/deps/v8/src/property-details.h
@@ -0,0 +1,182 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_DETAILS_H_
+#define V8_PROPERTY_DETAILS_H_
+
+#include "../include/v8.h"
+#include "allocation.h"
+#include "utils.h"
+
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+ NONE = v8::None,
+ READ_ONLY = v8::ReadOnly,
+ DONT_ENUM = v8::DontEnum,
+ DONT_DELETE = v8::DontDelete,
+ ABSENT = 16 // Used in runtime to indicate a property is absent.
+ // ABSENT can never be stored in or returned from a descriptor's attributes
+ // bitfield. It is only used as a return value meaning the attributes of
+ // a non-existent property.
+};
+
+
+namespace v8 {
+namespace internal {
+
+class Smi;
+
+// Type of properties.
+// Order of properties is significant.
+// Must fit in the BitField PropertyDetails::TypeField.
+// A copy of this is in mirror-debugger.js.
+enum PropertyType {
+ NORMAL = 0, // only in slow mode
+ FIELD = 1, // only in fast mode
+ CONSTANT_FUNCTION = 2, // only in fast mode
+ CALLBACKS = 3,
+ HANDLER = 4, // only in lookup results, not in descriptors
+ INTERCEPTOR = 5, // only in lookup results, not in descriptors
+ // All properties before MAP_TRANSITION are real.
+ MAP_TRANSITION = 6, // only in fast mode
+ ELEMENTS_TRANSITION = 7,
+ CONSTANT_TRANSITION = 8, // only in fast mode
+ NULL_DESCRIPTOR = 9, // only in fast mode
+ // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
+ // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
+ // nonexistent properties.
+ NONEXISTENT = NULL_DESCRIPTOR
+};
+
+
+inline bool IsTransitionType(PropertyType type) {
+ switch (type) {
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ return true;
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ case NULL_DESCRIPTOR:
+ return false;
+ }
+ UNREACHABLE(); // keep the compiler happy
+ return false;
+}
+
+
+inline bool IsRealProperty(PropertyType type) {
+ switch (type) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ return true;
+ case MAP_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR:
+ return false;
+ }
+ UNREACHABLE(); // keep the compiler happy
+ return false;
+}
+
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ int index = 0) {
+ ASSERT(TypeField::is_valid(type));
+ ASSERT(AttributesField::is_valid(attributes));
+ ASSERT(StorageField::is_valid(index));
+
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | StorageField::encode(index);
+
+ ASSERT(type == this->type());
+ ASSERT(attributes == this->attributes());
+ ASSERT(index == this->index());
+ }
+
+ // Conversion for storing details as Object*.
+ explicit inline PropertyDetails(Smi* smi);
+ inline Smi* AsSmi();
+
+ PropertyType type() { return TypeField::decode(value_); }
+
+ bool IsTransition() {
+ PropertyType t = type();
+ ASSERT(t != INTERCEPTOR);
+ return IsTransitionType(t);
+ }
+
+ bool IsProperty() {
+ return IsRealProperty(type());
+ }
+
+ PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+ int index() { return StorageField::decode(value_); }
+
+ inline PropertyDetails AsDeleted();
+
+ static bool IsValidIndex(int index) {
+ return StorageField::is_valid(index);
+ }
+
+ bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+ bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+ bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+ bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class TypeField: public BitField<PropertyType, 0, 4> {};
+ class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
+ class DeletedField: public BitField<uint32_t, 7, 1> {};
+ class StorageField: public BitField<uint32_t, 8, 32-8> {};
+
+ static const int kInitialIndex = 1;
+
+ private:
+ uint32_t value_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PROPERTY_DETAILS_H_
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 7cc2df5a3..6e043e268 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -31,6 +31,15 @@ namespace v8 {
namespace internal {
+void LookupResult::Iterate(ObjectVisitor* visitor) {
+ LookupResult* current = this; // Could be NULL.
+ while (current != NULL) {
+ visitor->VisitPointer(BitCast<Object**>(&current->holder_));
+ current = current->next_;
+ }
+}
+
+
#ifdef OBJECT_PRINT
void LookupResult::Print(FILE* out) {
if (!IsFound()) {
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index e7d9fc534..3203dd112 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -115,11 +115,9 @@ class MapTransitionDescriptor: public Descriptor {
class ElementsTransitionDescriptor: public Descriptor {
public:
ElementsTransitionDescriptor(String* key,
- Map* map,
- ElementsKind elements_kind)
- : Descriptor(key, map, PropertyDetails(NONE,
- ELEMENTS_TRANSITION,
- elements_kind)) { }
+ Object* map_or_array)
+ : Descriptor(key, map_or_array, PropertyDetails(NONE,
+ ELEMENTS_TRANSITION)) { }
};
// Marks a field name in a map so that adding the field is guaranteed
@@ -166,10 +164,20 @@ class CallbacksDescriptor: public Descriptor {
class LookupResult BASE_EMBEDDED {
public:
- LookupResult()
- : lookup_type_(NOT_FOUND),
+ explicit LookupResult(Isolate* isolate)
+ : isolate_(isolate),
+ next_(isolate->top_lookup_result()),
+ lookup_type_(NOT_FOUND),
+ holder_(NULL),
cacheable_(true),
- details_(NONE, NORMAL) {}
+ details_(NONE, NORMAL) {
+ isolate->SetTopLookupResult(this);
+ }
+
+ ~LookupResult() {
+ ASSERT(isolate_->top_lookup_result() == this);
+ isolate_->SetTopLookupResult(next_);
+ }
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
@@ -202,9 +210,9 @@ class LookupResult BASE_EMBEDDED {
number_ = entry;
}
- void HandlerResult() {
+ void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
- holder_ = NULL;
+ holder_ = proxy;
details_ = PropertyDetails(NONE, HANDLER);
cacheable_ = false;
}
@@ -217,11 +225,17 @@ class LookupResult BASE_EMBEDDED {
void NotFound() {
lookup_type_ = NOT_FOUND;
+ holder_ = NULL;
}
JSObject* holder() {
ASSERT(IsFound());
- return holder_;
+ return JSObject::cast(holder_);
+ }
+
+ JSProxy* proxy() {
+ ASSERT(IsFound());
+ return JSProxy::cast(holder_);
}
PropertyType type() {
@@ -248,7 +262,7 @@ class LookupResult BASE_EMBEDDED {
// Is the result is a property excluding transitions and the null
// descriptor?
bool IsProperty() {
- return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
+ return IsFound() && GetPropertyDetails().IsProperty();
}
// Is the result a property or a transition?
@@ -278,10 +292,10 @@ class LookupResult BASE_EMBEDDED {
}
}
+
Map* GetTransitionMap() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
- type() == ELEMENTS_TRANSITION);
+ ASSERT(IsTransitionType(type()));
return Map::cast(GetValue());
}
@@ -343,7 +357,12 @@ class LookupResult BASE_EMBEDDED {
return holder()->GetNormalizedProperty(this);
}
+ void Iterate(ObjectVisitor* visitor);
+
private:
+ Isolate* isolate_;
+ LookupResult* next_;
+
// Where did we find the result;
enum {
NOT_FOUND,
@@ -354,7 +373,7 @@ class LookupResult BASE_EMBEDDED {
CONSTANT_TYPE
} lookup_type_;
- JSObject* holder_;
+ JSReceiver* holder_;
int number_;
bool cacheable_;
PropertyDetails details_;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 4e44cd4ef..3cd467faf 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -32,7 +32,10 @@ var $Proxy = global.Proxy
$Proxy.create = function(handler, proto) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
- if (!IS_SPEC_OBJECT(proto)) proto = null // Mozilla does this...
+ if (IS_UNDEFINED(proto))
+ proto = null
+ else if (!(IS_SPEC_OBJECT(proto) || proto === null))
+ throw MakeTypeError("proto_non_object", ["create"])
return %CreateJSProxy(handler, proto)
}
@@ -42,8 +45,14 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
if (!IS_SPEC_FUNCTION(callTrap))
throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
if (IS_UNDEFINED(constructTrap)) {
- constructTrap = callTrap
- } else if (!IS_SPEC_FUNCTION(constructTrap)) {
+ constructTrap = DerivedConstructTrap(callTrap)
+ } else if (IS_SPEC_FUNCTION(constructTrap)) {
+ // Make sure the trap receives 'undefined' as this.
+ var construct = constructTrap
+ constructTrap = function() {
+ return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
+ }
+ } else {
throw MakeTypeError("trap_function_expected",
["createFunction", "construct"])
}
@@ -57,6 +66,17 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
// Builtins
////////////////////////////////////////////////////////////////////////////////
+function DerivedConstructTrap(callTrap) {
+ return function() {
+ var proto = this.prototype
+ if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+ var obj = new $Object()
+ obj.__proto__ = proto
+ var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
+ return IS_SPEC_OBJECT(result) ? result : obj
+ }
+}
+
function DelegateCallAndConstruct(callTrap, constructTrap) {
return function() {
return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
@@ -136,9 +156,32 @@ function DerivedKeysTrap() {
var enumerableNames = []
for (var i = 0, count = 0; i < names.length; ++i) {
var name = names[i]
- if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
+ var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
+ if (!IS_UNDEFINED(desc) && desc.enumerable) {
enumerableNames[count++] = names[i]
}
}
return enumerableNames
}
+
+function DerivedEnumerateTrap() {
+ var names = this.getPropertyNames()
+ var enumerableNames = []
+ for (var i = 0, count = 0; i < names.length; ++i) {
+ var name = names[i]
+ var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
+ if (!IS_UNDEFINED(desc) && desc.enumerable) {
+ enumerableNames[count++] = names[i]
+ }
+ }
+ return enumerableNames
+}
+
+function ProxyEnumerate(proxy) {
+ var handler = %GetHandler(proxy)
+ if (IS_UNDEFINED(handler.enumerate)) {
+ return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
+ } else {
+ return ToStringArray(handler.enumerate(), "enumerate")
+ }
+}
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index b32d71dba..f8432784f 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler* assembler) :
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 4);
- const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
+ ASSERT(type < 5);
+ const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index f91ea9348..99f3a37f4 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -81,7 +81,7 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
if (subject->IsAsciiRepresentation()) {
const byte* address;
if (StringShape(subject).IsExternal()) {
- const char* data = ExternalAsciiString::cast(subject)->resource()->data();
+ const char* data = ExternalAsciiString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
} else {
ASSERT(subject->IsSeqAsciiString());
@@ -92,7 +92,7 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
}
const uc16* data;
if (StringShape(subject).IsExternal()) {
- data = ExternalTwoByteString::cast(subject)->resource()->data();
+ data = ExternalTwoByteString::cast(subject)->GetChars();
} else {
ASSERT(subject->IsSeqTwoByteString());
data = SeqTwoByteString::cast(subject)->GetChars();
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 38d449615..596c18543 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -95,12 +95,11 @@ function RegExpConstructor(pattern, flags) {
}
}
-
// Deprecated RegExp.prototype.compile method. We behave like the constructor
// were called again. In SpiderMonkey, this method returns the regexp object.
// In JSC, it returns undefined. For compatibility with JSC, we match their
// behavior.
-function CompileRegExp(pattern, flags) {
+function RegExpCompile(pattern, flags) {
// Both JSC and SpiderMonkey treat a missing pattern argument as the
// empty subject string, and an actual undefined value passed as the
// pattern as the string 'undefined'. Note that JSC is inconsistent
@@ -108,6 +107,11 @@ function CompileRegExp(pattern, flags) {
// RegExp.prototype.compile and in the constructor, where they are
// the empty string. For compatibility with JSC, we match their
// behavior.
+ if (this == $RegExp.prototype) {
+ // We don't allow recompiling RegExp.prototype.
+ throw MakeTypeError('incompatible_method_receiver',
+ ['RegExp.prototype.compile', this]);
+ }
if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
DoConstructRegExp(this, 'undefined', flags);
} else {
@@ -170,13 +174,6 @@ function RegExpExec(string) {
['RegExp.prototype.exec', this]);
}
- if (%_ArgumentsLength() === 0) {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- if (IS_UNDEFINED(regExpInput)) {
- throw MakeError('no_input_to_regexp', [this]);
- }
- string = regExpInput;
- }
string = TO_STRING_INLINE(string);
var lastIndex = this.lastIndex;
@@ -225,14 +222,6 @@ function RegExpTest(string) {
throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.test', this]);
}
- if (%_ArgumentsLength() == 0) {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- if (IS_UNDEFINED(regExpInput)) {
- throw MakeError('no_input_to_regexp', [this]);
- }
- string = regExpInput;
- }
-
string = TO_STRING_INLINE(string);
var lastIndex = this.lastIndex;
@@ -408,7 +397,6 @@ var lastMatchInfoOverride = null;
function SetUpRegExp() {
%CheckIsBootstrapping();
%FunctionSetInstanceClassName($RegExp, 'RegExp');
- %FunctionSetPrototype($RegExp, new $Object());
%SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
%SetCode($RegExp, RegExpConstructor);
@@ -416,7 +404,7 @@ function SetUpRegExp() {
"exec", RegExpExec,
"test", RegExpTest,
"toString", RegExpToString,
- "compile", CompileRegExp
+ "compile", RegExpCompile
));
// The length of compile is 1 in SpiderMonkey.
@@ -431,14 +419,18 @@ function SetUpRegExp() {
}
function RegExpSetInput(string) {
LAST_INPUT(lastMatchInfo) = ToString(string);
- };
+ }
%DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
%DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
- %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput,
+ DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput,
+ DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput,
+ DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput,
+ DONT_ENUM | DONT_DELETE);
// The properties multiline and $* are aliases for each other. When this
// value is set in SpiderMonkey, the value it is set to is coerced to a
@@ -449,38 +441,51 @@ function SetUpRegExp() {
// Getter and setter for multiline.
var multiline = false;
- function RegExpGetMultiline() { return multiline; };
- function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
+ function RegExpGetMultiline() { return multiline; }
+ function RegExpSetMultiline(flag) { multiline = flag ? true : false; }
- %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
- %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
- %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline,
+ DONT_DELETE);
+ %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline,
+ DONT_DELETE);
+ %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline,
+ DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline,
+ DONT_ENUM | DONT_DELETE);
function NoOpSetter(ignored) {}
// Static properties set by a successful match.
- %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
+ %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch,
+ DONT_DELETE);
%DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch,
+ DONT_ENUM | DONT_DELETE);
%DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
+ %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen,
+ DONT_DELETE);
%DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen,
+ DONT_ENUM | DONT_DELETE);
%DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
+ %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext,
+ DONT_DELETE);
%DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext,
+ DONT_ENUM | DONT_DELETE);
%DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
+ %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext,
+ DONT_DELETE);
%DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
- %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
+ %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext,
+ DONT_ENUM | DONT_DELETE);
%DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
for (var i = 1; i < 10; ++i) {
- %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
+ %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i),
+ DONT_DELETE);
%DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
}
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 3d4c2dcc1..a70cd82a7 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -236,10 +236,22 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
if (processor.HasStackOverflow()) return false;
if (processor.result_assigned()) {
+ ASSERT(function->end_position() != RelocInfo::kNoPosition);
Isolate* isolate = info->isolate();
Zone* zone = isolate->zone();
- VariableProxy* result_proxy = new(zone) VariableProxy(isolate, result);
- body->Add(new(zone) ReturnStatement(result_proxy));
+ // Set the position of the assignment statement one character past the
+ // source code, such that it definitely is not in the source code range
+ // of an immediate inner scope. For example in
+ // eval('with ({x:1}) x = 1');
+ // the end position of the function generated for executing the eval code
+ // coincides with the end of the with scope which is the position of '1'.
+ int position = function->end_position();
+ VariableProxy* result_proxy = new(zone) VariableProxy(
+ isolate, result->name(), false, position);
+ result_proxy->BindTo(result);
+ Statement* result_statement = new(zone) ReturnStatement(result_proxy);
+ result_statement->set_statement_pos(position);
+ body->Add(result_statement);
}
}
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 26d884610..eaa6e1560 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -35,6 +35,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
+#include "isolate-inl.h"
#include "mark-compact.h"
#include "platform.h"
#include "scopeinfo.h"
@@ -135,14 +136,13 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
StackCheckStub check_stub;
- Object* check_code;
- MaybeObject* maybe_check_code = check_stub.TryGetCode();
- if (maybe_check_code->ToObject(&check_code)) {
+ Code* stack_check_code = NULL;
+ if (check_stub.FindCodeInCache(&stack_check_code)) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
Deoptimizer::PatchStackCheckCode(unoptimized_code,
- Code::cast(check_code),
+ stack_check_code,
replacement_code);
}
}
@@ -338,7 +338,8 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window_[i];
- if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
+ if (function != NULL &&
+ !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
sampler_window_[i] = NULL;
}
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 813f98f68..a2e569b31 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -42,6 +42,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
+#include "isolate-inl.h"
#include "jsregexp.h"
#include "json-parser.h"
#include "liveedit.h"
@@ -105,6 +106,27 @@ namespace internal {
type name = NumberTo##Type(obj);
+// Assert that the given argument has a valid value for a StrictModeFlag
+// and store it in a StrictModeFlag variable with the given name.
+#define CONVERT_STRICT_MODE_ARG(name, index) \
+ ASSERT(args[index]->IsSmi()); \
+ ASSERT(args.smi_at(index) == kStrictMode || \
+ args.smi_at(index) == kNonStrictMode); \
+ StrictModeFlag name = \
+ static_cast<StrictModeFlag>(args.smi_at(index));
+
+
+// Assert that the given argument has a valid value for a LanguageMode
+// and store it in a LanguageMode variable with the given name.
+#define CONVERT_LANGUAGE_MODE_ARG(name, index) \
+ ASSERT(args[index]->IsSmi()); \
+ ASSERT(args.smi_at(index) == CLASSIC_MODE || \
+ args.smi_at(index) == STRICT_MODE || \
+ args.smi_at(index) == EXTENDED_MODE); \
+ LanguageMode name = \
+ static_cast<LanguageMode>(args.smi_at(index));
+
+
MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
JSObject* boilerplate) {
StackLimitCheck check(isolate);
@@ -177,6 +199,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
// Pixel elements cannot be created using an object literal.
ASSERT(!copy->HasExternalArrayElements());
switch (copy->GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
if (elements->map() == heap->fixed_cow_array_map()) {
@@ -189,6 +212,9 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
} else {
for (int i = 0; i < elements->length(); i++) {
Object* value = elements->get(i);
+ ASSERT(value->IsSmi() ||
+ value->IsTheHole() ||
+ (copy->GetElementsKind() == FAST_ELEMENTS));
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
{ MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -240,18 +266,6 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
- CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return DeepCopyBoilerplate(isolate, boilerplate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
- CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return isolate->heap()->CopyJSObject(boilerplate);
-}
-
-
static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context,
Handle<FixedArray> constant_properties,
@@ -417,6 +431,9 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
+static const int kSmiOnlyLiteralMinimumLength = 1024;
+
+
static Handle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
@@ -424,37 +441,77 @@ static Handle<Object> CreateArrayLiteralBoilerplate(
// Create the JSArray.
Handle<JSFunction> constructor(
JSFunction::GlobalContextFromLiterals(*literals)->array_function());
- Handle<Object> object = isolate->factory()->NewJSObject(constructor);
-
- const bool is_cow =
- (elements->map() == isolate->heap()->fixed_cow_array_map());
- Handle<FixedArray> copied_elements =
- is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
-
- Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
- if (is_cow) {
-#ifdef DEBUG
- // Copy-on-write arrays must be shallow (and simple).
- for (int i = 0; i < content->length(); i++) {
- ASSERT(!content->get(i)->IsFixedArray());
- }
-#endif
+ Handle<JSArray> object =
+ Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
+
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(elements->get(1)));
+
+ ASSERT(FLAG_smi_only_arrays || constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ bool allow_literal_kind_transition = FLAG_smi_only_arrays &&
+ constant_elements_kind > object->GetElementsKind();
+
+ if (!FLAG_smi_only_arrays &&
+ constant_elements_values->length() > kSmiOnlyLiteralMinimumLength &&
+ constant_elements_kind != object->GetElementsKind()) {
+ allow_literal_kind_transition = true;
+ }
+
+ // If the ElementsKind of the constant values of the array literal are less
+ // specific than the ElementsKind of the boilerplate array object, change the
+ // boilerplate array object's map to reflect that kind.
+ if (allow_literal_kind_transition) {
+ Handle<Map> transitioned_array_map =
+ isolate->factory()->GetElementsTransitionMap(object,
+ constant_elements_kind);
+ object->set_map(*transitioned_array_map);
+ }
+
+ Handle<FixedArrayBase> copied_elements_values;
+ if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
+ ASSERT(FLAG_smi_only_arrays);
+ copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
+ Handle<FixedDoubleArray>::cast(constant_elements_values));
} else {
- for (int i = 0; i < content->length(); i++) {
- if (content->get(i)->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object or array literal.
- Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
- Handle<Object> result =
- CreateLiteralBoilerplate(isolate, literals, fa);
- if (result.is_null()) return result;
- content->set(i, *result);
+ ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ constant_elements_kind == FAST_ELEMENTS);
+ const bool is_cow =
+ (constant_elements_values->map() ==
+ isolate->heap()->fixed_cow_array_map());
+ if (is_cow) {
+ copied_elements_values = constant_elements_values;
+#if DEBUG
+ Handle<FixedArray> fixed_array_values =
+ Handle<FixedArray>::cast(copied_elements_values);
+ for (int i = 0; i < fixed_array_values->length(); i++) {
+ ASSERT(!fixed_array_values->get(i)->IsFixedArray());
+ }
+#endif
+ } else {
+ Handle<FixedArray> fixed_array_values =
+ Handle<FixedArray>::cast(constant_elements_values);
+ Handle<FixedArray> fixed_array_values_copy =
+ isolate->factory()->CopyFixedArray(fixed_array_values);
+ copied_elements_values = fixed_array_values_copy;
+ for (int i = 0; i < fixed_array_values->length(); i++) {
+ Object* current = fixed_array_values->get(i);
+ if (current->IsFixedArray()) {
+ // The value contains the constant_properties of a
+ // simple object or array literal.
+ Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
+ Handle<Object> result =
+ CreateLiteralBoilerplate(isolate, literals, fa);
+ if (result.is_null()) return result;
+ fixed_array_values_copy->set(i, *result);
+ }
}
}
}
-
- // Set the elements.
- Handle<JSArray>::cast(object)->SetContent(*content);
+ object->set_elements(*copied_elements_values);
+ object->set_length(Smi::FromInt(copied_elements_values->length()));
return object;
}
@@ -487,28 +544,6 @@ static Handle<Object> CreateLiteralBoilerplate(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
- // Takes a FixedArray of elements containing the literal elements of
- // the array literal and produces JSArray with those elements.
- // Additionally takes the literals array of the surrounding function
- // which contains the context from which to get the Array function
- // to use for creating the array literal.
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- Handle<Object> object =
- CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (object.is_null()) return Failure::Exception();
-
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *object);
- return *object;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -669,6 +704,82 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSSet, holder, 0);
+ Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0);
+ holder->set_table(*table);
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSSet, holder, 0);
+ Handle<Object> key(args[1]);
+ Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+ table = ObjectHashSetAdd(table, key);
+ holder->set_table(*table);
+ return isolate->heap()->undefined_symbol();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSSet, holder, 0);
+ Handle<Object> key(args[1]);
+ Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+ return isolate->heap()->ToBoolean(table->Contains(*key));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSSet, holder, 0);
+ Handle<Object> key(args[1]);
+ Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+ table = ObjectHashSetRemove(table, key);
+ holder->set_table(*table);
+ return isolate->heap()->undefined_symbol();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSMap, holder, 0);
+ Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
+ holder->set_table(*table);
+ return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSMap, holder, 0);
+ Handle<Object> key(args[1]);
+ return ObjectHashTable::cast(holder->table())->Lookup(*key);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(JSMap, holder, 0);
+ Handle<Object> key(args[1]);
+ Handle<Object> value(args[2]);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+ holder->set_table(*new_table);
+ return *value;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -685,10 +796,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
- // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
- // because they cannot be cast to JSObject to get an identity hash code.
- CONVERT_ARG_CHECKED(JSObject, key, 1);
- return weakmap->table()->Lookup(*key);
+ CONVERT_ARG_CHECKED(JSReceiver, key, 1);
+ return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
}
@@ -696,10 +805,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
- // TODO(mstarzinger): See Runtime_WeakMapGet above.
- CONVERT_ARG_CHECKED(JSObject, key, 1);
+ CONVERT_ARG_CHECKED(JSReceiver, key, 1);
Handle<Object> value(args[2]);
- Handle<ObjectHashTable> table(weakmap->table());
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
weakmap->set_table(*new_table);
return *value;
@@ -752,49 +860,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
}
-// Inserts an object as the hidden prototype of another object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSObject, jsobject, args[0]);
- CONVERT_CHECKED(JSObject, proto, args[1]);
-
- // Sanity checks. The old prototype (that we are replacing) could
- // theoretically be null, but if it is not null then check that we
- // didn't already install a hidden prototype here.
- RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
- !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
- RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
-
- // Allocate up front before we start altering state in case we get a GC.
- Object* map_or_failure;
- { MaybeObject* maybe_map_or_failure = proto->map()->CopyDropTransitions();
- if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
- return maybe_map_or_failure;
- }
- }
- Map* new_proto_map = Map::cast(map_or_failure);
-
- { MaybeObject* maybe_map_or_failure = jsobject->map()->CopyDropTransitions();
- if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
- return maybe_map_or_failure;
- }
- }
- Map* new_map = Map::cast(map_or_failure);
-
- // Set proto's prototype to be the old prototype of the object.
- new_proto_map->set_prototype(jsobject->GetPrototype());
- proto->set_map(new_proto_map);
- new_proto_map->set_is_hidden_prototype();
-
- // Set the object's prototype to proto.
- new_map->set_prototype(proto);
- jsobject->set_map(new_map);
-
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
@@ -929,7 +994,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
HandleScope scope(isolate);
Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
- LookupResult result;
+ LookupResult result(isolate);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(String, name, 1);
@@ -960,7 +1025,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
case JSObject::INTERCEPTED_ELEMENT:
case JSObject::FAST_ELEMENT: {
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = GetElement(obj, index);
+ Handle<Object> value = Object::GetElement(obj, index);
RETURN_IF_EMPTY_HANDLE(isolate, value);
elms->set(VALUE_INDEX, *value);
elms->set(WRITABLE_INDEX, heap->true_value());
@@ -1004,7 +1069,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
case NORMAL: {
// This is a data property.
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = GetElement(obj, index);
+ Handle<Object> value = Object::GetElement(obj, index);
ASSERT(!value.is_null());
elms->set(VALUE_INDEX, *value);
elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
@@ -1208,49 +1273,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
if (value->IsUndefined() || is_const_property) {
// Lookup the property in the global object, and don't set the
// value of the variable if the property is already there.
- LookupResult lookup;
+ LookupResult lookup(isolate);
global->Lookup(*name, &lookup);
if (lookup.IsProperty()) {
- // Determine if the property is local by comparing the holder
- // against the global object. The information will be used to
- // avoid throwing re-declaration errors when declaring
- // variables or constants that exist in the prototype chain.
- bool is_local = (*global == lookup.holder());
- // Get the property attributes and determine if the property is
- // read-only.
+ // We found an existing property. Unless it was an interceptor
+ // that claims the property is absent, skip this declaration.
+ if (lookup.type() != INTERCEPTOR) {
+ continue;
+ }
PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- bool is_read_only = (attributes & READ_ONLY) != 0;
- if (lookup.type() == INTERCEPTOR) {
- // If the interceptor says the property is there, we
- // just return undefined without overwriting the property.
- // Otherwise, we continue to setting the property.
- if (attributes != ABSENT) {
- // Check if the existing property conflicts with regards to const.
- if (is_local && (is_read_only || is_const_property)) {
- const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- };
- // The property already exists without conflicting: Go to
- // the next declaration.
- continue;
- }
- // Fall-through and introduce the absent property by using
- // SetProperty.
- } else {
- // For const properties, we treat a callback with this name
- // even in the prototype as a conflicting declaration.
- if (is_const_property && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError(isolate, "const", name);
- }
- // Otherwise, we check for locally conflicting declarations.
- if (is_local && (is_read_only || is_const_property)) {
- const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- }
- // The property already exists without conflicting: Go to
- // the next declaration.
+ if (attributes != ABSENT) {
continue;
}
+ // Fall-through and introduce the absent property by using
+ // SetProperty.
}
} else {
is_function_declaration = true;
@@ -1264,32 +1300,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
value = function;
}
- LookupResult lookup;
+ LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
- // There's a local property that we need to overwrite because
- // we're either declaring a function or there's an interceptor
- // that claims the property is absent.
- //
- // Check for conflicting re-declarations. We cannot have
- // conflicting types in case of intercepted properties because
- // they are absent.
- if (lookup.IsProperty() &&
- (lookup.type() != INTERCEPTOR) &&
- (lookup.IsReadOnly() || is_const_property)) {
- const char* type = (lookup.IsReadOnly()) ? "const" : "var";
- return ThrowRedeclarationError(isolate, type, name);
- }
-
// Compute the property attributes. According to ECMA-262, section
// 13, page 71, the property must be read-only and
// non-deletable. However, neither SpiderMonkey nor KJS creates the
// property as read-only, so we don't either.
int attr = NONE;
- if ((flags & kDeclareGlobalsEvalFlag) == 0) {
+ if (!DeclareGlobalsEvalFlag::decode(flags)) {
attr |= DONT_DELETE;
}
- bool is_native = (flags & kDeclareGlobalsNativeFlag) != 0;
+ bool is_native = DeclareGlobalsNativeFlag::decode(flags);
if (is_const_property || (is_native && is_function_declaration)) {
attr |= READ_ONLY;
}
@@ -1314,15 +1336,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
value,
attributes));
} else {
- StrictModeFlag strict_mode =
- ((flags & kDeclareGlobalsStrictModeFlag) != 0) ? kStrictMode
- : kNonStrictMode;
+ LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+ StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
RETURN_IF_EMPTY_HANDLE(isolate,
SetProperty(global,
name,
value,
static_cast<PropertyAttributes>(attr),
- strict_mode));
+ strict_mode_flag));
}
}
@@ -1335,15 +1357,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(Context, context, 0);
+ // Declarations are always made in a function or global context. In the
+ // case of eval code, the context passed is the context of the caller,
+ // which may be some nested context and not the declaration context.
+ RUNTIME_ASSERT(args[0]->IsContext());
+ Handle<Context> context(Context::cast(args[0])->declaration_context());
+
Handle<String> name(String::cast(args[1]));
PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
Handle<Object> initial_value(args[3], isolate);
- // Declarations are always done in a function or global context.
- context = Handle<Context>(context->declaration_context());
-
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
@@ -1352,9 +1376,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
context->Lookup(name, flags, &index, &attributes, &binding_flags);
if (attributes != ABSENT) {
- // The name was declared before; check for conflicting
- // re-declarations: This is similar to the code in parser.cc in
- // the AstBuildingParser::Declare function.
+ // The name was declared before; check for conflicting re-declarations.
if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
// Functions are not read-only.
ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1365,53 +1387,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Initialize it if necessary.
if (*initial_value != NULL) {
if (index >= 0) {
- // The variable or constant context slot should always be in
- // the function context or the arguments object.
- if (holder->IsContext()) {
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
- }
- } else {
- // The holder is an arguments object.
- Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- Handle<Object> result = SetElement(arguments, index, initial_value,
- kNonStrictMode);
- if (result.is_null()) return Failure::Exception();
+ ASSERT(holder.is_identical_to(context));
+ if (((attributes & READ_ONLY) == 0) ||
+ context->get(index)->IsTheHole()) {
+ context->set(index, *initial_value);
}
} else {
- // Slow case: The property is not in the FixedArray part of the context.
- Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+ // Slow case: The property is in the context extension object of a
+ // function context or the global object of a global context.
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(context_ext, name, initial_value,
- mode, kNonStrictMode));
+ SetProperty(object, name, initial_value, mode, kNonStrictMode));
}
}
} else {
// The property is not in the function context. It needs to be
- // "declared" in the function context's extension context, or in the
- // global context.
- Handle<JSObject> context_ext;
+ // "declared" in the function context's extension context or as a
+ // property of the the global object.
+ Handle<JSObject> object;
if (context->has_extension()) {
- // The function context's extension context exists - use it.
- context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
+ object = Handle<JSObject>(JSObject::cast(context->extension()));
} else {
- // The function context's extension context does not exists - allocate
- // it.
- context_ext = isolate->factory()->NewJSObject(
+ // Context extension objects are allocated lazily.
+ ASSERT(context->IsFunctionContext());
+ object = isolate->factory()->NewJSObject(
isolate->context_extension_function());
- // And store it in the extension slot.
- context->set_extension(*context_ext);
+ context->set_extension(*object);
}
- ASSERT(*context_ext != NULL);
+ ASSERT(*object != NULL);
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!context_ext->HasLocalProperty(*name));
+ ASSERT(!object->HasLocalProperty(*name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -1421,15 +1431,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// SetProperty and no setters are invoked for those since they are
// not real JSObjects.
if (initial_value->IsTheHole() &&
- !context_ext->IsJSContextExtensionObject()) {
- LookupResult lookup;
- context_ext->Lookup(*name, &lookup);
+ !object->IsJSContextExtensionObject()) {
+ LookupResult lookup(isolate);
+ object->Lookup(*name, &lookup);
if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
return ThrowRedeclarationError(isolate, "const", name);
}
}
RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(context_ext, name, value, mode,
+ SetProperty(object, name, value, mode,
kNonStrictMode));
}
@@ -1440,7 +1450,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
NoHandleAllocation nha;
// args[0] == name
- // args[1] == strict_mode
+ // args[1] == language_mode
// args[2] == value (optional)
// Determine if we need to assign to the variable if it already
@@ -1451,8 +1461,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
CONVERT_ARG_CHECKED(String, name, 0);
GlobalObject* global = isolate->context()->global();
RUNTIME_ASSERT(args[1]->IsSmi());
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
+ CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
+ StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable.
@@ -1465,67 +1476,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
- JSObject* real_holder = global;
- LookupResult lookup;
- while (true) {
- real_holder->LocalLookup(*name, &lookup);
- if (lookup.IsProperty()) {
- // Determine if this is a redeclaration of something read-only.
- if (lookup.IsReadOnly()) {
- // If we found readonly property on one of hidden prototypes,
- // just shadow it.
- if (real_holder != isolate->context()->global()) break;
- return ThrowRedeclarationError(isolate, "const", name);
- }
-
- // Determine if this is a redeclaration of an intercepted read-only
- // property and figure out if the property exists at all.
- bool found = true;
- PropertyType type = lookup.type();
- if (type == INTERCEPTOR) {
- HandleScope handle_scope(isolate);
- Handle<JSObject> holder(real_holder);
- PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
- real_holder = *holder;
- if (intercepted == ABSENT) {
- // The interceptor claims the property isn't there. We need to
- // make sure to introduce it.
- found = false;
- } else if ((intercepted & READ_ONLY) != 0) {
- // The property is present, but read-only. Since we're trying to
- // overwrite it with a variable declaration we must throw a
- // re-declaration error. However if we found readonly property
- // on one of hidden prototypes, just shadow it.
- if (real_holder != isolate->context()->global()) break;
- return ThrowRedeclarationError(isolate, "const", name);
+ Object* object = global;
+ LookupResult lookup(isolate);
+ while (object->IsJSObject() &&
+ JSObject::cast(object)->map()->is_hidden_prototype()) {
+ JSObject* raw_holder = JSObject::cast(object);
+ raw_holder->LocalLookup(*name, &lookup);
+ if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+ HandleScope handle_scope(isolate);
+ Handle<JSObject> holder(raw_holder);
+ PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+ // Update the raw pointer in case it's changed due to GC.
+ raw_holder = *holder;
+ if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+ // Found an interceptor that's not read only.
+ if (assign) {
+ return raw_holder->SetProperty(
+ &lookup, *name, args[2], attributes, strict_mode_flag);
+ } else {
+ return isolate->heap()->undefined_value();
}
}
-
- if (found && !assign) {
- // The global property is there and we're not assigning any value
- // to it. Just return.
- return isolate->heap()->undefined_value();
- }
-
- // Assign the value (or undefined) to the property.
- Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
- return real_holder->SetProperty(
- &lookup, *name, value, attributes, strict_mode);
}
-
- Object* proto = real_holder->GetPrototype();
- if (!proto->IsJSObject())
- break;
-
- if (!JSObject::cast(proto)->map()->is_hidden_prototype())
- break;
-
- real_holder = JSObject::cast(proto);
+ object = raw_holder->GetPrototype();
}
+ // Reload global in case the loop above performed a GC.
global = isolate->context()->global();
if (assign) {
- return global->SetProperty(*name, args[2], attributes, strict_mode);
+ return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
}
return isolate->heap()->undefined_value();
}
@@ -1552,7 +1531,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// add it as a local property even in case of callbacks in the
// prototype chain (this rules out using SetProperty).
// We use SetLocalPropertyIgnoreAttributes instead
- LookupResult lookup;
+ LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
if (!lookup.IsProperty()) {
return global->SetLocalPropertyIgnoreAttributes(*name,
@@ -1560,25 +1539,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
attributes);
}
- // Determine if this is a redeclaration of something not
- // read-only. In case the result is hidden behind an interceptor we
- // need to ask it for the property attributes.
if (!lookup.IsReadOnly()) {
- if (lookup.type() != INTERCEPTOR) {
- return ThrowRedeclarationError(isolate, "var", name);
- }
-
- PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
-
- // Throw re-declaration error if the intercepted property is present
- // but not read-only.
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- return ThrowRedeclarationError(isolate, "var", name);
- }
-
// Restore global object from context (in case of GC) and continue
- // with setting the value because the property is either absent or
- // read-only. We also have to do redo the lookup.
+ // with setting the value.
HandleScope handle_scope(isolate);
Handle<GlobalObject> global(isolate->context()->global());
@@ -1595,19 +1558,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
return *value;
}
- // Set the value, but only we're assigning the initial value to a
+ // Set the value, but only if we're assigning the initial value to a
// constant. For now, we determine this by checking if the
// current value is the hole.
- // Strict mode handling not needed (const disallowed in strict mode).
+ // Strict mode handling not needed (const is disallowed in strict mode).
PropertyType type = lookup.type();
if (type == FIELD) {
FixedArray* properties = global->properties();
int index = lookup.GetFieldIndex();
- if (properties->get(index)->IsTheHole()) {
+ if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
- if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
+ if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
+ !lookup.IsReadOnly()) {
global->SetNormalizedProperty(&lookup, *value);
}
} else {
@@ -1627,11 +1591,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<Object> value(args[0], isolate);
ASSERT(!value->IsTheHole());
- CONVERT_ARG_CHECKED(Context, context, 1);
- Handle<String> name(String::cast(args[2]));
// Initializations are always done in a function or global context.
- context = Handle<Context>(context->declaration_context());
+ RUNTIME_ASSERT(args[1]->IsContext());
+ Handle<Context> context(Context::cast(args[1])->declaration_context());
+
+ Handle<String> name(String::cast(args[2]));
int index;
PropertyAttributes attributes;
@@ -1640,39 +1605,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes, &binding_flags);
- // In most situations, the property introduced by the const
- // declaration should be present in the context extension object.
- // However, because declaration and initialization are separate, the
- // property might have been deleted (if it was introduced by eval)
- // before we reach the initialization point.
- //
- // Example:
- //
- // function f() { eval("delete x; const x;"); }
- //
- // In that case, the initialization behaves like a normal assignment
- // to property 'x'.
if (index >= 0) {
- if (holder->IsContext()) {
- // Property was found in a context. Perform the assignment if we
- // found some non-constant or an uninitialized constant.
- Handle<Context> context = Handle<Context>::cast(holder);
- if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
- context->set(index, *value);
- }
- } else {
- // The holder is an arguments object.
- ASSERT((attributes & READ_ONLY) == 0);
- Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- RETURN_IF_EMPTY_HANDLE(
- isolate,
- SetElement(arguments, index, value, kNonStrictMode));
+ ASSERT(holder->IsContext());
+ // Property was found in a context. Perform the assignment if we
+ // found some non-constant or an uninitialized constant.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+ context->set(index, *value);
}
return *value;
}
- // The property could not be found, we introduce it in the global
- // context.
+ // The property could not be found, we introduce it as a property of the
+ // global object.
if (attributes == ABSENT) {
Handle<JSObject> global = Handle<JSObject>(
isolate->context()->global());
@@ -1683,29 +1628,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
return *value;
}
- // The property was present in a context extension object.
- Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+ // The property was present in some function's context extension object,
+ // as a property on the subject of a with, or as a property of the global
+ // object.
+ //
+ // In most situations, eval-introduced consts should still be present in
+ // the context extension object. However, because declaration and
+ // initialization are separate, the property might have been deleted
+ // before we reach the initialization point.
+ //
+ // Example:
+ //
+ // function f() { eval("delete x; const x;"); }
+ //
+ // In that case, the initialization behaves like a normal assignment.
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
- if (*context_ext == context->extension()) {
- // This is the property that was introduced by the const
- // declaration. Set it if it hasn't been set before. NOTE: We
- // cannot use GetProperty() to get the current value as it
- // 'unholes' the value.
- LookupResult lookup;
- context_ext->LocalLookupRealNamedProperty(*name, &lookup);
+ if (*object == context->extension()) {
+ // This is the property that was introduced by the const declaration.
+ // Set it if it hasn't been set before. NOTE: We cannot use
+ // GetProperty() to get the current value as it 'unholes' the value.
+ LookupResult lookup(isolate);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
ASSERT(lookup.IsProperty()); // the property was declared
ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
PropertyType type = lookup.type();
if (type == FIELD) {
- FixedArray* properties = context_ext->properties();
+ FixedArray* properties = object->properties();
int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
- if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
- context_ext->SetNormalizedProperty(&lookup, *value);
+ if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
+ object->SetNormalizedProperty(&lookup, *value);
}
} else {
// We should not reach here. Any real, named property should be
@@ -1713,13 +1670,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
UNREACHABLE();
}
} else {
- // The property was found in a different context extension object.
- // Set it if it is not a read-only property.
+ // The property was found on some other object. Set it if it is not a
+ // read-only property.
if ((attributes & READ_ONLY) == 0) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(context_ext, name, value, attributes, kNonStrictMode));
+ SetProperty(object, name, value, attributes, kNonStrictMode));
}
}
@@ -1818,14 +1775,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
- // TODO(lrn): Consider skipping write barrier on booleans as well.
- // Both true and false should be in oldspace at all times.
- regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
- regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
- regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
+ // Both true and false are immovable immortal objects so no need for write
+ // barrier.
+ regexp->InObjectPropertyAtPut(
+ JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(
+ JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(
+ JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
Smi::FromInt(0),
- SKIP_WRITE_BARRIER);
+ SKIP_WRITE_BARRIER); // It's a Smi.
return regexp;
}
@@ -1914,7 +1874,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[0]);
SharedFunctionInfo* shared = function->shared();
- if (shared->native() || shared->strict_mode()) {
+ if (shared->native() || !shared->is_classic_mode()) {
return isolate->heap()->undefined_value();
}
// Returns undefined for strict or native functions, or
@@ -1994,15 +1954,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- fun->shared()->set_bound(true);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -2081,24 +2032,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
}
-// Creates a local, readonly, property called length with the correct
-// length (when read by the user). This effectively overwrites the
-// interceptor used to normally provide the length.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionSetLength) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSFunction, fun, args[0]);
- CONVERT_CHECKED(Smi, length, args[1]);
- MaybeObject* maybe_name =
- isolate->heap()->AllocateStringFromAscii(CStrVector("length"));
- String* name;
- if (!maybe_name->To(&name)) return maybe_name;
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
- return fun->AddProperty(name, length, attr, kNonStrictMode);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -2201,13 +2134,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
Handle<SharedFunctionInfo> shared(fun->shared());
- if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+ if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
// Since we don't store the source for this we should never
// optimize this.
shared->code()->set_optimizable(false);
-
// Set the code, scope info, formal parameter count,
// and the length of the target function.
target->shared()->set_code(shared->code());
@@ -2239,9 +2171,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
}
- // It's okay to skip the write barrier here because the literals
- // are guaranteed to be in old space.
- target->set_literals(*literals, SKIP_WRITE_BARRIER);
+ target->set_literals(*literals);
target->set_next_function_link(isolate->heap()->undefined_value());
if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
@@ -2325,7 +2255,8 @@ class FixedArrayBuilder {
public:
explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
: array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
- length_(0) {
+ length_(0),
+ has_non_smi_elements_(false) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(initial_capacity > 0);
@@ -2333,7 +2264,8 @@ class FixedArrayBuilder {
explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
: array_(backing_store),
- length_(0) {
+ length_(0),
+ has_non_smi_elements_(false) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(backing_store->length() > 0);
@@ -2361,12 +2293,15 @@ class FixedArrayBuilder {
}
void Add(Object* value) {
+ ASSERT(!value->IsSmi());
ASSERT(length_ < capacity());
array_->set(length_, value);
length_++;
+ has_non_smi_elements_ = true;
}
void Add(Smi* value) {
+ ASSERT(value->IsSmi());
ASSERT(length_ < capacity());
array_->set(length_, value);
length_++;
@@ -2391,7 +2326,7 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- target_array->set_elements(*array_);
+ FACTORY->SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -2399,6 +2334,7 @@ class FixedArrayBuilder {
private:
Handle<FixedArray> array_;
int length_;
+ bool has_non_smi_elements_;
};
@@ -2893,7 +2829,7 @@ void FindStringIndicesDispatch(Isolate* isolate,
}
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (pattern->IsAsciiRepresentation()) {
+ if (pattern_content.IsAscii()) {
FindStringIndices(isolate,
subject_vector,
pattern_content.ToAsciiVector(),
@@ -3019,7 +2955,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
// Shortcut for simple non-regexp global replacements
if (is_global &&
- regexp->TypeTag() == JSRegExp::ATOM &&
+ regexp_handle->TypeTag() == JSRegExp::ATOM &&
compiled_replacement.simple_hint()) {
if (subject_handle->HasOnlyAsciiChars() &&
replacement_handle->HasOnlyAsciiChars()) {
@@ -3242,6 +3178,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Address end_of_string = answer->address() + string_size;
isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
+ MemoryChunk::IncrementLiveBytes(answer->address(), -delta);
+ }
return *answer;
}
@@ -4001,13 +3940,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
// Slow case.
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+ return *isolate->factory()->nan_symbol();
}
if (isinf(value)) {
if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+ return *isolate->factory()->minus_infinity_symbol();
}
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+ return *isolate->factory()->infinity_symbol();
}
char* str = DoubleToRadixCString(value, radix);
MaybeObject* result =
@@ -4023,13 +3962,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+ return *isolate->factory()->nan_symbol();
}
if (isinf(value)) {
if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+ return *isolate->factory()->minus_infinity_symbol();
}
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+ return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
@@ -4048,13 +3987,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+ return *isolate->factory()->nan_symbol();
}
if (isinf(value)) {
if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+ return *isolate->factory()->minus_infinity_symbol();
}
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+ return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
@@ -4073,13 +4012,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+ return *isolate->factory()->nan_symbol();
}
if (isinf(value)) {
if (value < 0) {
- return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+ return *isolate->factory()->minus_infinity_symbol();
}
- return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+ return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
@@ -4126,11 +4065,6 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
return prototype->GetElement(index);
}
- return GetElement(object, index);
-}
-
-
-MaybeObject* Runtime::GetElement(Handle<Object> object, uint32_t index) {
return object->GetElement(index);
}
@@ -4187,6 +4121,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
}
+MaybeObject* TransitionElements(Handle<Object> object,
+ ElementsKind to_kind,
+ Isolate* isolate) {
+ HandleScope scope(isolate);
+ if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
+ ElementsKind from_kind =
+ Handle<JSObject>::cast(object)->map()->elements_kind();
+ if (Map::IsValidElementsTransition(from_kind, to_kind)) {
+ Handle<Object> result =
+ TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ if (result.is_null()) return isolate->ThrowIllegalOperation();
+ return *result;
+ }
+ return isolate->ThrowIllegalOperation();
+}
+
+
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
NoHandleAllocation ha;
@@ -4203,40 +4154,63 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
//
// Additionally, we need to make sure that we do not cache results
// for objects that require access checks.
- if (args[0]->IsJSObject() &&
- !args[0]->IsJSGlobalProxy() &&
- !args[0]->IsAccessCheckNeeded() &&
- args[1]->IsString()) {
- JSObject* receiver = JSObject::cast(args[0]);
- String* key = String::cast(args[1]);
- if (receiver->HasFastProperties()) {
- // Attempt to use lookup cache.
- Map* receiver_map = receiver->map();
- KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
- int offset = keyed_lookup_cache->Lookup(receiver_map, key);
- if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
- return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
- }
- // Lookup cache miss. Perform lookup and update the cache if appropriate.
- LookupResult result;
- receiver->LocalLookup(key, &result);
- if (result.IsProperty() && result.type() == FIELD) {
- int offset = result.GetFieldIndex();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
+ if (args[0]->IsJSObject()) {
+ if (!args[0]->IsJSGlobalProxy() &&
+ !args[0]->IsAccessCheckNeeded() &&
+ args[1]->IsString()) {
+ JSObject* receiver = JSObject::cast(args[0]);
+ String* key = String::cast(args[1]);
+ if (receiver->HasFastProperties()) {
+ // Attempt to use lookup cache.
+ Map* receiver_map = receiver->map();
+ KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+ int offset = keyed_lookup_cache->Lookup(receiver_map, key);
+ if (offset != -1) {
+ Object* value = receiver->FastPropertyAt(offset);
+ return value->IsTheHole()
+ ? isolate->heap()->undefined_value()
+ : value;
+ }
+ // Lookup cache miss. Perform lookup and update the cache if
+ // appropriate.
+ LookupResult result(isolate);
+ receiver->LocalLookup(key, &result);
+ if (result.IsProperty() && result.type() == FIELD) {
+ int offset = result.GetFieldIndex();
+ keyed_lookup_cache->Update(receiver_map, key, offset);
+ return receiver->FastPropertyAt(offset);
+ }
+ } else {
+ // Attempt dictionary lookup.
+ StringDictionary* dictionary = receiver->property_dictionary();
+ int entry = dictionary->FindEntry(key);
+ if ((entry != StringDictionary::kNotFound) &&
+ (dictionary->DetailsAt(entry).type() == NORMAL)) {
+ Object* value = dictionary->ValueAt(entry);
+ if (!receiver->IsGlobalObject()) return value;
+ value = JSGlobalPropertyCell::cast(value)->value();
+ if (!value->IsTheHole()) return value;
+ // If value is the hole do the general lookup.
+ }
}
- } else {
- // Attempt dictionary lookup.
- StringDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(key);
- if ((entry != StringDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == NORMAL)) {
- Object* value = dictionary->ValueAt(entry);
- if (!receiver->IsGlobalObject()) return value;
- value = JSGlobalPropertyCell::cast(value)->value();
- if (!value->IsTheHole()) return value;
- // If value is the hole do the general lookup.
+ } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
+ // JSObject without a string key. If the key is a Smi, check for a
+ // definite out-of-bounds access to elements, which is a strong indicator
+ // that subsequent accesses will also call the runtime. Proactively
+ // transition elements to FAST_ELEMENTS to avoid excessive boxing of
+ // doubles for those future calls in the case that the elements would
+ // become FAST_DOUBLE_ELEMENTS.
+ Handle<JSObject> js_object(args.at<JSObject>(0));
+ ElementsKind elements_kind = js_object->GetElementsKind();
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS) {
+ FixedArrayBase* elements = js_object->elements();
+ if (args.at<Smi>(1)->value() >= elements->length()) {
+ MaybeObject* maybe_object = TransitionElements(js_object,
+ FAST_ELEMENTS,
+ isolate);
+ if (maybe_object->IsFailure()) return maybe_object;
+ }
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4269,12 +4243,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
Object* fun = args[3];
- RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+ RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
CONVERT_CHECKED(Smi, flag_attr, args[4]);
int unchecked = flag_attr->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
RUNTIME_ASSERT(!obj->IsNull());
- LookupResult result;
+ LookupResult result(isolate);
obj->LocalLookupRealNamedProperty(name, &result);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
@@ -4316,11 +4290,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
uint32_t index;
bool is_element = name->AsArrayIndex(&index);
- // Special case for elements if any of the flags are true.
+ // Special case for elements if any of the flags might be involved.
// If elements are in fast case we always implicitly assume that:
// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
- if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
- is_element) {
+ if (is_element && (attr != NONE ||
+ js_object->HasLocalElement(index) == JSObject::DICTIONARY_ELEMENT)) {
// Normalize the elements to enable attributes on the property.
if (js_object->IsJSGlobalProxy()) {
// We do not need to do access checks here since these has already
@@ -4358,7 +4332,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
return *obj_value;
}
- LookupResult result;
+ LookupResult result(isolate);
js_object->LocalLookupRealNamedProperty(*name, &result);
// To be compatible with safari we do not change the value on API objects
@@ -4437,6 +4411,14 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return isolate->Throw(*error);
}
+ if (object->IsJSProxy()) {
+ bool has_pending_exception = false;
+ Handle<Object> name = Execution::ToString(key, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+ return JSProxy::cast(*object)->SetProperty(
+ String::cast(*name), *value, attr, strict_mode);
+ }
+
// If the object isn't a JavaScript object, we ignore the store.
if (!object->IsJSObject()) return *value;
@@ -4556,7 +4538,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index;
- if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
@@ -4567,8 +4549,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
return isolate->heap()->true_value();
}
- return JSObject::cast(*receiver)->DeleteElement(
- index, JSReceiver::FORCE_DELETION);
+ return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
}
Handle<String> key_string;
@@ -4603,10 +4584,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
StrictModeFlag strict_mode = kNonStrictMode;
if (args.length() == 5) {
- CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
- RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
- strict_unchecked == kNonStrictMode);
- strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+ CONVERT_STRICT_MODE_ARG(strict_mode_flag, 4);
+ strict_mode = strict_mode_flag;
}
return Runtime::SetObjectProperty(isolate,
@@ -4618,6 +4597,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
+ NoHandleAllocation ha;
+ RUNTIME_ASSERT(args.length() == 1);
+ Handle<Object> object = args.at<Object>(0);
+ return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
+ NoHandleAllocation ha;
+ RUNTIME_ASSERT(args.length() == 1);
+ Handle<Object> object = args.at<Object>(0);
+ return TransitionElements(object, FAST_ELEMENTS, isolate);
+}
+
+
// Set the native flag on the function.
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
@@ -4635,6 +4630,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+ RUNTIME_ASSERT(args.length() == 5);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_SMI_ARG_CHECKED(store_index, 1);
+ Handle<Object> value = args.at<Object>(2);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 3);
+ CONVERT_SMI_ARG_CHECKED(literal_index, 4);
+ HandleScope scope;
+
+ Object* raw_boilerplate_object = literals->get(literal_index);
+ Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+#if DEBUG
+ ElementsKind elements_kind = object->GetElementsKind();
+#endif
+ ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
+ // Smis should never trigger transitions.
+ ASSERT(!value->IsSmi());
+
+ if (value->IsNumber()) {
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+ FixedDoubleArray* double_array =
+ FixedDoubleArray::cast(object->elements());
+ HeapNumber* number = HeapNumber::cast(*value);
+ double_array->set(store_index, number->Number());
+ } else {
+ ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ elements_kind == FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_ELEMENTS);
+ FixedArray* object_array =
+ FixedArray::cast(object->elements());
+ object_array->set(store_index, *value);
+ }
+ return *object;
+}
+
+
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -4664,8 +4697,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
CONVERT_CHECKED(JSReceiver, object, args[0]);
CONVERT_CHECKED(String, key, args[1]);
- CONVERT_SMI_ARG_CHECKED(strict, 2);
- return object->DeleteProperty(key, (strict == kStrictMode)
+ CONVERT_STRICT_MODE_ARG(strict_mode, 2);
+ return object->DeleteProperty(key, (strict_mode == kStrictMode)
? JSReceiver::STRICT_DELETION
: JSReceiver::NORMAL_DELETION);
}
@@ -4730,29 +4763,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
+ CONVERT_CHECKED(JSReceiver, receiver, args[0]);
+ CONVERT_CHECKED(String, key, args[1]);
- // Only JS receivers can have properties.
- if (args[0]->IsJSReceiver()) {
- JSReceiver* receiver = JSReceiver::cast(args[0]);
- CONVERT_CHECKED(String, key, args[1]);
- if (receiver->HasProperty(key)) return isolate->heap()->true_value();
- }
- return isolate->heap()->false_value();
+ bool result = receiver->HasProperty(key);
+ if (isolate->has_pending_exception()) return Failure::Exception();
+ return isolate->heap()->ToBoolean(result);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
+ CONVERT_CHECKED(JSReceiver, receiver, args[0]);
+ CONVERT_CHECKED(Smi, index, args[1]);
- // Only JS objects can have elements.
- if (args[0]->IsJSObject()) {
- JSObject* object = JSObject::cast(args[0]);
- CONVERT_CHECKED(Smi, index_obj, args[1]);
- uint32_t index = index_obj->value();
- if (object->HasElement(index)) return isolate->heap()->true_value();
- }
- return isolate->heap()->false_value();
+ bool result = receiver->HasElement(index->value());
+ if (isolate->has_pending_exception()) return Failure::Exception();
+ return isolate->heap()->ToBoolean(result);
}
@@ -4765,7 +4793,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
uint32_t index;
if (key->AsArrayIndex(&index)) {
- return isolate->heap()->ToBoolean(object->HasElement(index));
+ JSObject::LocalElementType type = object->HasLocalElement(index);
+ switch (type) {
+ case JSObject::UNDEFINED_ELEMENT:
+ case JSObject::STRING_CHARACTER_ELEMENT:
+ return isolate->heap()->false_value();
+ case JSObject::INTERCEPTED_ELEMENT:
+ case JSObject::FAST_ELEMENT:
+ return isolate->heap()->true_value();
+ case JSObject::DICTIONARY_ELEMENT: {
+ if (object->IsJSGlobalProxy()) {
+ Object* proto = object->GetPrototype();
+ if (proto->IsNull()) {
+ return isolate->heap()->false_value();
+ }
+ ASSERT(proto->IsJSGlobalObject());
+ object = JSObject::cast(proto);
+ }
+ FixedArray* elements = FixedArray::cast(object->elements());
+ NumberDictionary* dictionary = NULL;
+ if (elements->map() ==
+ isolate->heap()->non_strict_arguments_elements_map()) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
+ int entry = dictionary->FindEntry(index);
+ ASSERT(entry != NumberDictionary::kNotFound);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ return isolate->heap()->ToBoolean(!details.IsDontEnum());
+ }
+ }
}
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
@@ -4776,8 +4834,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- return *GetKeysFor(object);
+ CONVERT_ARG_CHECKED(JSReceiver, object, 0);
+ bool threw = false;
+ Handle<JSArray> result = GetKeysFor(object, &threw);
+ if (threw) return Failure::Exception();
+ return *result;
}
@@ -4789,14 +4850,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
ASSERT(args.length() == 1);
- CONVERT_CHECKED(JSObject, raw_object, args[0]);
+ CONVERT_CHECKED(JSReceiver, raw_object, args[0]);
if (raw_object->IsSimpleEnum()) return raw_object->map();
HandleScope scope(isolate);
- Handle<JSObject> object(raw_object);
- Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
- INCLUDE_PROTOS);
+ Handle<JSReceiver> object(raw_object);
+ bool threw = false;
+ Handle<FixedArray> content =
+ GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw);
+ if (threw) return Failure::Exception();
// Test again, since cache may have been built by preceding call.
if (object->IsSimpleEnum()) return object->map();
@@ -4993,8 +5056,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
object = Handle<JSObject>::cast(proto);
}
- Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
- LOCAL_ONLY);
+ bool threw = false;
+ Handle<FixedArray> contents =
+ GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw);
+ if (threw) return Failure::Exception();
+
// Some fast paths through GetKeysInFixedArrayFor reuse a cached
// property array and since the result is mutable we have to create
// a fresh clone on each invocation.
@@ -5058,7 +5124,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (key->Equals(isolate->heap()->callee_symbol())) {
Object* function = frame->function();
if (function->IsJSFunction() &&
- JSFunction::cast(function)->shared()->strict_mode()) {
+ !JSFunction::cast(function)->shared()->is_classic_mode()) {
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
@@ -5579,7 +5645,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
StringType* new_string = StringType::cast(new_object);
Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize);
+ new_string->address() + SeqString::kHeaderSize);
if (comma) *(write_cursor++) = ',';
*(write_cursor++) = '"';
@@ -5667,16 +5733,15 @@ static MaybeObject* QuoteJsonString(Isolate* isolate,
StringType* new_string = StringType::cast(new_object);
ASSERT(isolate->heap()->new_space()->Contains(new_string));
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize);
+ new_string->address() + SeqString::kHeaderSize);
if (comma) *(write_cursor++) = ',';
write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
write_cursor,
characters);
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize));
+ new_string->address() + SeqString::kHeaderSize));
isolate->heap()->new_space()->
template ShrinkStringAtAllocationBoundary<StringType>(
new_string, final_length);
@@ -5754,9 +5819,8 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
StringType* new_string = StringType::cast(new_object);
ASSERT(isolate->heap()->new_space()->Contains(new_string));
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize);
+ new_string->address() + SeqString::kHeaderSize);
*(write_cursor++) = '[';
for (int i = 0; i < length; i++) {
if (i != 0) *(write_cursor++) = ',';
@@ -5777,7 +5841,7 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize));
+ new_string->address() + SeqString::kHeaderSize));
isolate->heap()->new_space()->
template ShrinkStringAtAllocationBoundary<StringType>(
new_string, final_length);
@@ -6146,7 +6210,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b;
+ return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
}
@@ -6229,6 +6293,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
+ MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
+ if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastElements());
@@ -6275,11 +6341,11 @@ static int CopyCachedAsciiCharsToArray(Heap* heap,
FixedArray* ascii_cache = heap->single_character_string_cache();
Object* undefined = heap->undefined_value();
int i;
+ WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (i = 0; i < length; ++i) {
Object* value = ascii_cache->get(chars[i]);
if (value == undefined) break;
- ASSERT(!heap->InNewSpace(value));
- elements->set(i, value, SKIP_WRITE_BARRIER);
+ elements->set(i, value, mode);
}
if (i < length) {
ASSERT(Smi::FromInt(0) == 0);
@@ -6603,6 +6669,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
+ MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
+ if (maybe_result->IsFailure()) return maybe_result;
+
int special_length = special->length();
if (!array->HasFastElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6830,7 +6899,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, elements_array, args[0]);
- RUNTIME_ASSERT(elements_array->HasFastElements());
+ RUNTIME_ASSERT(elements_array->HasFastElements() ||
+ elements_array->HasFastSmiOnlyElements());
CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
CONVERT_CHECKED(String, separator, args[2]);
// elements_array is fast-mode JSarray of alternating positions
@@ -7434,7 +7504,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
}
-static int MakeDay(int year, int month, int day) {
+static int MakeDay(int year, int month) {
static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
181, 212, 243, 273, 304, 334};
static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
@@ -7471,23 +7541,22 @@ static int MakeDay(int year, int month, int day) {
year1 / 400 -
base_day;
- if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
- return day_from_year + day_from_month[month] + day - 1;
+ if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
+ return day_from_year + day_from_month[month];
}
- return day_from_year + day_from_month_leap[month] + day - 1;
+ return day_from_year + day_from_month_leap[month];
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
NoHandleAllocation ha;
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
CONVERT_SMI_ARG_CHECKED(month, 1);
- CONVERT_SMI_ARG_CHECKED(date, 2);
- return Smi::FromInt(MakeDay(year, month, date));
+ return Smi::FromInt(MakeDay(year, month));
}
@@ -7716,7 +7785,7 @@ static inline void DateYMDFromTimeAfter1970(int date,
month = kMonthInYear[date];
day = kDayInYear[date];
- ASSERT(MakeDay(year, month, day) == save_date);
+ ASSERT(MakeDay(year, month) + day - 1 == save_date);
}
@@ -7730,7 +7799,7 @@ static inline void DateYMDFromTimeSlow(int date,
year = 400 * (date / kDaysIn400Years) - kYearsOffset;
date %= kDaysIn400Years;
- ASSERT(MakeDay(year, 0, 1) + date == save_date);
+ ASSERT(MakeDay(year, 0) + date == save_date);
date--;
int yd1 = date / kDaysIn100Years;
@@ -7753,8 +7822,8 @@ static inline void DateYMDFromTimeSlow(int date,
ASSERT(is_leap || (date >= 0));
ASSERT((date < 365) || (is_leap && (date < 366)));
ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
- ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
- ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
+ ASSERT(is_leap || ((MakeDay(year, 0) + date) == save_date));
+ ASSERT(!is_leap || ((MakeDay(year, 0) + date + 1) == save_date));
if (is_leap) {
day = kDayInYear[2*365 + 1 + date];
@@ -7764,7 +7833,7 @@ static inline void DateYMDFromTimeSlow(int date,
month = kMonthInYear[date];
}
- ASSERT(MakeDay(year, month, day) == save_date);
+ ASSERT(MakeDay(year, month) + day - 1 == save_date);
}
@@ -7788,11 +7857,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
int year, month, day;
DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
- RUNTIME_ASSERT(res_array->elements()->map() ==
- isolate->heap()->fixed_array_map());
- FixedArray* elms = FixedArray::cast(res_array->elements());
- RUNTIME_ASSERT(elms->length() == 3);
+ FixedArrayBase* elms_base = FixedArrayBase::cast(res_array->elements());
+ RUNTIME_ASSERT(elms_base->length() == 3);
+ RUNTIME_ASSERT(res_array->HasFastTypeElements());
+ MaybeObject* maybe = res_array->EnsureWritableFastElements();
+ if (maybe->IsFailure()) return maybe;
+ FixedArray* elms = FixedArray::cast(res_array->elements());
elms->set(0, Smi::FromInt(year));
elms->set(1, Smi::FromInt(month));
elms->set(2, Smi::FromInt(day));
@@ -7846,14 +7917,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
--index;
}
- ScopeInfo<> scope_info(callee->shared()->scope_info());
+ Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
while (index >= 0) {
// Detect duplicate names to the right in the parameter list.
- Handle<String> name = scope_info.parameter_name(index);
- int context_slot_count = scope_info.number_of_context_slots();
+ Handle<String> name(scope_info->ParameterName(index));
+ int context_local_count = scope_info->ContextLocalCount();
bool duplicate = false;
for (int j = index + 1; j < parameter_count; ++j) {
- if (scope_info.parameter_name(j).is_identical_to(name)) {
+ if (scope_info->ParameterName(j) == *name) {
duplicate = true;
break;
}
@@ -7868,17 +7939,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
// The context index goes in the parameter map with a hole in the
// arguments array.
int context_index = -1;
- for (int j = Context::MIN_CONTEXT_SLOTS;
- j < context_slot_count;
- ++j) {
- if (scope_info.context_slot_name(j).is_identical_to(name)) {
+ for (int j = 0; j < context_local_count; ++j) {
+ if (scope_info->ContextLocalName(j) == *name) {
context_index = j;
break;
}
}
ASSERT(context_index >= 0);
arguments->set_the_hole(index);
- parameter_map->set(index + 2, Smi::FromInt(context_index));
+ parameter_map->set(index + 2, Smi::FromInt(
+ Context::MIN_CONTEXT_SLOTS + context_index));
}
--index;
@@ -7952,8 +8022,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
}
-static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
- int* total_argc) {
+// Find the arguments of the JavaScript function invocation that called
+// into C++ code. Collect these in a newly allocated array of handles (possibly
+// prefixed by a number of empty handles).
+static SmartArrayPointer<Handle<Object> > GetCallerArguments(
+ int prefix_argc,
+ int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
@@ -7968,11 +8042,12 @@ static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
inlined_frame_index,
&args_slots);
- *total_argc = bound_argc + args_count;
- SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+ *total_argc = prefix_argc + args_count;
+ SmartArrayPointer<Handle<Object> > param_data(
+ NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = args_slots[i].GetValue();
- param_data[bound_argc + i] = val.location();
+ param_data[prefix_argc + i] = val;
}
return param_data;
} else {
@@ -7980,48 +8055,131 @@ static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
frame = it.frame();
int args_count = frame->ComputeParametersCount();
- *total_argc = bound_argc + args_count;
- SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+ *total_argc = prefix_argc + args_count;
+ SmartArrayPointer<Handle<Object> > param_data(
+ NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = Handle<Object>(frame->GetParameter(i));
- param_data[bound_argc + i] = val.location();
+ param_data[prefix_argc + i] = val;
}
return param_data;
}
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ CONVERT_ARG_CHECKED(JSFunction, bound_function, 0);
+ RUNTIME_ASSERT(args[3]->IsNumber());
+ Handle<Object> bindee = args.at<Object>(1);
+
+ // TODO(lrn): Create bound function in C++ code from premade shared info.
+ bound_function->shared()->set_bound(true);
+ // Get all arguments of calling function (Function.prototype.bind).
+ int argc = 0;
+ SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc);
+ // Don't count the this-arg.
+ if (argc > 0) {
+ ASSERT(*arguments[0] == args[2]);
+ argc--;
+ } else {
+ ASSERT(args[2]->IsUndefined());
+ }
+ // Initialize array of bindings (function, this, and any existing arguments
+ // if the function was already bound).
+ Handle<FixedArray> new_bindings;
+ int i;
+ if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
+ Handle<FixedArray> old_bindings(
+ JSFunction::cast(*bindee)->function_bindings());
+ new_bindings =
+ isolate->factory()->NewFixedArray(old_bindings->length() + argc);
+ bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex));
+ i = 0;
+ for (int n = old_bindings->length(); i < n; i++) {
+ new_bindings->set(i, old_bindings->get(i));
+ }
+ } else {
+ int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
+ new_bindings = isolate->factory()->NewFixedArray(array_size);
+ new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
+ new_bindings->set(JSFunction::kBoundThisIndex, args[2]);
+ i = 2;
+ }
+ // Copy arguments, skipping the first which is "this_arg".
+ for (int j = 0; j < argc; j++, i++) {
+ new_bindings->set(i, *arguments[j + 1]);
+ }
+ new_bindings->set_map(isolate->heap()->fixed_cow_array_map());
+ bound_function->set_function_bindings(*new_bindings);
+
+ // Update length.
+ Handle<String> length_symbol = isolate->factory()->length_symbol();
+ Handle<Object> new_length(args.at<Object>(3));
+ PropertyAttributes attr =
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+ ForceSetProperty(bound_function, length_symbol, new_length, attr);
+ return *bound_function;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
+ HandleScope handles(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
+ if (callable->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
+ if (function->shared()->bound()) {
+ Handle<FixedArray> bindings(function->function_bindings());
+ ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
+ return *isolate->factory()->NewJSArrayWithElements(bindings);
+ }
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
HandleScope scope(isolate);
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 1);
// First argument is a function to use as a constructor.
CONVERT_ARG_CHECKED(JSFunction, function, 0);
-
- // Second argument is either null or an array of bound arguments.
- Handle<FixedArray> bound_args;
- int bound_argc = 0;
- if (!args[1]->IsNull()) {
- CONVERT_ARG_CHECKED(JSArray, params, 1);
- RUNTIME_ASSERT(params->HasFastElements());
- bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
- bound_argc = Smi::cast(params->length())->value();
- }
+ RUNTIME_ASSERT(function->shared()->bound());
+
+ // The argument is a bound function. Extract its bound arguments
+ // and callable.
+ Handle<FixedArray> bound_args =
+ Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
+ int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
+ Handle<Object> bound_function(
+ JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)));
+ ASSERT(!bound_function->IsJSFunction() ||
+ !Handle<JSFunction>::cast(bound_function)->shared()->bound());
int total_argc = 0;
- SmartArrayPointer<Object**> param_data =
- GetNonBoundArguments(bound_argc, &total_argc);
+ SmartArrayPointer<Handle<Object> > param_data =
+ GetCallerArguments(bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
- Handle<Object> val = Handle<Object>(bound_args->get(i));
- param_data[i] = val.location();
+ param_data[i] = Handle<Object>(bound_args->get(
+ JSFunction::kBoundArgumentsStartIndex + i));
}
+ if (!bound_function->IsJSFunction()) {
+ bool exception_thrown;
+ bound_function = Execution::TryGetConstructorDelegate(bound_function,
+ &exception_thrown);
+ if (exception_thrown) return Failure::Exception();
+ }
+ ASSERT(bound_function->IsJSFunction());
+
bool exception = false;
Handle<Object> result =
- Execution::New(function, total_argc, *param_data, &exception);
+ Execution::New(Handle<JSFunction>::cast(bound_function),
+ total_argc, *param_data, &exception);
if (exception) {
- return Failure::Exception();
+ return Failure::Exception();
}
-
ASSERT(!result.is_null());
return *result;
}
@@ -8034,12 +8192,9 @@ static void TrySettingInlineConstructStub(Isolate* isolate,
prototype = Handle<Object>(function->instance_prototype(), isolate);
}
if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler;
- MaybeObject* code = compiler.CompileConstructStub(*function);
- if (!code->IsFailure()) {
- function->shared()->set_construct_stub(
- Code::cast(code->ToObjectUnchecked()));
- }
+ ConstructStubCompiler compiler(isolate);
+ Handle<Code> code = compiler.CompileConstructStub(function);
+ function->shared()->set_construct_stub(*code);
}
}
@@ -8098,9 +8253,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// available. We cannot use EnsureCompiled because that forces a
// compilation through the shared function info which makes it
// impossible for us to optimize.
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
+ if (!function->is_compiled()) {
+ JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+ }
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!function->has_initial_map() &&
shared->IsInobjectSlackTrackingInProgress()) {
// The tracking is already in progress for another function. We can only
@@ -8151,7 +8308,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
// Compile the target function.
ASSERT(!function->is_compiled());
- if (!CompileLazy(function, KEEP_EXCEPTION)) {
+ if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
return Failure::Exception();
}
@@ -8188,7 +8345,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
function->ReplaceCode(function->shared()->code());
return function->code();
}
- if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) {
+ if (JSFunction::CompileOptimized(function,
+ AstNode::kNoNumber,
+ CLEAR_EXCEPTION)) {
return function->code();
}
if (FLAG_trace_opt) {
@@ -8201,6 +8360,31 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
+class ActivationsFinder : public ThreadVisitor {
+ public:
+ explicit ActivationsFinder(JSFunction* function)
+ : function_(function), has_activations_(false) {}
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ if (has_activations_) return;
+
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized() && frame->function() == function_) {
+ has_activations_ = true;
+ return;
+ }
+ }
+ }
+
+ bool has_activations() { return has_activations_; }
+
+ private:
+ JSFunction* function_;
+ bool has_activations_;
+};
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8247,17 +8431,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Count the number of optimized activations of the function.
- int activations = 0;
+ // Find other optimized activations of the function.
+ bool has_other_activations = false;
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized() && frame->function() == *function) {
- activations++;
+ has_other_activations = true;
+ break;
}
it.Advance();
}
- if (activations == 0) {
+ if (!has_other_activations) {
+ ActivationsFinder activations_finder(*function);
+ isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
+ has_other_activations = activations_finder.has_activations();
+ }
+
+ if (!has_other_activations) {
if (FLAG_trace_deopt) {
PrintF("[removing optimized code for: ");
function->PrintName();
@@ -8312,6 +8503,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
+ // The least significant bit (after untagging) indicates whether the
+ // function is currently optimized, regardless of reason.
if (!V8::UseCrankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
@@ -8395,7 +8588,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
// Try to compile the optimized code. A true return value from
// CompileOptimized means that compilation succeeded, not necessarily
// that optimization succeeded.
- if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
+ if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
function->IsOptimized()) {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
function->code()->deoptimization_data());
@@ -8452,6 +8645,42 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() >= 2);
+ CONVERT_CHECKED(JSReceiver, fun, args[args.length() - 1]);
+ Object* receiver = args[0];
+ int argc = args.length() - 2;
+
+ // If there are too many arguments, allocate argv via malloc.
+ const int argv_small_size = 10;
+ Handle<Object> argv_small_buffer[argv_small_size];
+ SmartArrayPointer<Handle<Object> > argv_large_buffer;
+ Handle<Object>* argv = argv_small_buffer;
+ if (argc > argv_small_size) {
+ argv = new Handle<Object>[argc];
+ if (argv == NULL) return isolate->StackOverflow();
+ argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ MaybeObject* maybe = args[1 + i];
+ Object* object;
+ if (!maybe->To<Object>(&object)) return maybe;
+ argv[i] = Handle<Object>(object);
+ }
+
+ bool threw;
+ Handle<JSReceiver> hfun(fun);
+ Handle<Object> hreceiver(receiver);
+ Handle<Object> result =
+ Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+
+ if (threw) return Failure::Exception();
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
@@ -8484,11 +8713,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
argv[i] = Handle<Object>(object);
}
- bool threw = false;
+ bool threw;
Handle<JSReceiver> hfun(fun);
Handle<Object> hreceiver(receiver);
- Handle<Object> result = Execution::Call(
- hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true);
+ Handle<Object> result =
+ Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8516,7 +8745,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[0]);
- int length = function->shared()->scope_info()->NumberOfContextSlots();
+ int length = function->shared()->scope_info()->ContextLength();
Object* result;
{ MaybeObject* maybe_result =
isolate->heap()->AllocateFunctionContext(length, function);
@@ -8602,7 +8831,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- SerializedScopeInfo* scope_info = SerializedScopeInfo::cast(args[0]);
+ ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
JSFunction* function;
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
@@ -8651,18 +8880,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
}
// The slot was found in a JSObject, either a context extension object,
- // the global object, or an arguments object. Try to delete it
- // (respecting DONT_DELETE). For consistency with V8's usual behavior,
- // which allows deleting all parameters in functions that mention
- // 'arguments', we do this even for the case of slots found on an
- // arguments object. The slot was found on an arguments object if the
- // index is non-negative.
+ // the global object, or the subject of a with. Try to delete it
+ // (respecting DONT_DELETE).
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- if (index >= 0) {
- return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
- } else {
- return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
- }
+ return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
}
@@ -8747,52 +8968,53 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
&attributes,
&binding_flags);
- // If the index is non-negative, the slot has been found in a local
- // variable or a parameter. Read it from the context object or the
- // arguments object.
+ // If the index is non-negative, the slot has been found in a context.
if (index >= 0) {
- // If the "property" we were looking for is a local variable or an
- // argument in a context, the receiver is the global object; see
- // ECMA-262, 3rd., 10.1.6 and 10.2.3.
+ ASSERT(holder->IsContext());
+ // If the "property" we were looking for is a local variable, the
+ // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
//
- // Use the hole as the receiver to signal that the receiver is
- // implicit and that the global receiver should be used.
+ // Use the hole as the receiver to signal that the receiver is implicit
+ // and that the global receiver should be used (as distinguished from an
+ // explicit receiver that happens to be a global object).
Handle<Object> receiver = isolate->factory()->the_hole_value();
- MaybeObject* value = (holder->IsContext())
- ? Context::cast(*holder)->get(index)
- : JSObject::cast(*holder)->GetElement(index);
+ Object* value = Context::cast(*holder)->get(index);
// Check for uninitialized bindings.
- if (holder->IsContext() &&
- binding_flags == MUTABLE_CHECK_INITIALIZED &&
- value->IsTheHole()) {
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- } else {
- return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+ switch (binding_flags) {
+ case MUTABLE_CHECK_INITIALIZED:
+ case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
+ if (value->IsTheHole()) {
+ Handle<Object> reference_error =
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return MakePair(isolate->Throw(*reference_error), NULL);
+ }
+ // FALLTHROUGH
+ case MUTABLE_IS_INITIALIZED:
+ case IMMUTABLE_IS_INITIALIZED:
+ case IMMUTABLE_IS_INITIALIZED_HARMONY:
+ ASSERT(!value->IsTheHole());
+ return MakePair(value, *receiver);
+ case IMMUTABLE_CHECK_INITIALIZED:
+ return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+ case MISSING_BINDING:
+ UNREACHABLE();
+ return MakePair(NULL, NULL);
}
}
- // If the holder is found, we read the property from it.
- if (!holder.is_null() && holder->IsJSObject()) {
- ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
- JSObject* object = JSObject::cast(*holder);
- Object* receiver;
- if (object->IsGlobalObject()) {
- receiver = GlobalObject::cast(object)->global_receiver();
- } else if (context->is_exception_holder(*holder)) {
- // Use the hole as the receiver to signal that the receiver is
- // implicit and that the global receiver should be used.
- receiver = isolate->heap()->the_hole_value();
- } else {
- receiver = ComputeReceiverForNonGlobal(isolate, object);
- }
-
+ // Otherwise, if the slot was found the holder is a context extension
+ // object, subject of a with, or a global object. We read the named
+ // property from it.
+ if (!holder.is_null()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ASSERT(object->HasProperty(*name));
// GetProperty below can cause GC.
- Handle<Object> receiver_handle(receiver);
+ Handle<Object> receiver_handle(object->IsGlobalObject()
+ ? GlobalObject::cast(*object)->global_receiver()
+ : ComputeReceiverForNonGlobal(isolate, *object));
- // No need to unhole the value here. This is taken care of by the
+ // No need to unhole the value here. This is taken care of by the
// GetProperty function.
MaybeObject* value = object->GetProperty(*name);
return MakePair(value, *receiver_handle);
@@ -8829,10 +9051,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
Handle<Object> value(args[0], isolate);
CONVERT_ARG_CHECKED(Context, context, 1);
CONVERT_ARG_CHECKED(String, name, 2);
- CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
- RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
- strict_unchecked == kNonStrictMode);
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+ CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+ StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
int index;
PropertyAttributes attributes;
@@ -8845,45 +9066,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
&binding_flags);
if (index >= 0) {
- if (holder->IsContext()) {
- Handle<Context> context = Handle<Context>::cast(holder);
- if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
- context->get(index)->IsTheHole()) {
- Handle<Object> error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- // Ignore if read_only variable.
- if ((attributes & READ_ONLY) == 0) {
- // Context is a fixed array and set cannot fail.
- context->set(index, *value);
- } else if (strict_mode == kStrictMode) {
- // Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_cannot_assign",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- } else {
- ASSERT((attributes & READ_ONLY) == 0);
- Handle<Object> result =
- SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
- if (result.is_null()) {
- ASSERT(isolate->has_pending_exception());
- return Failure::Exception();
- }
+ // The property was found in a context slot.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+ context->get(index)->IsTheHole()) {
+ Handle<Object> error =
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return isolate->Throw(*error);
+ }
+ // Ignore if read_only variable.
+ if ((attributes & READ_ONLY) == 0) {
+ // Context is a fixed array and set cannot fail.
+ context->set(index, *value);
+ } else if (strict_mode == kStrictMode) {
+ // Setting read only property in strict mode.
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_cannot_assign",
+ HandleVector(&name, 1));
+ return isolate->Throw(*error);
}
return *value;
}
- // Slow case: The property is not in a FixedArray context.
- // It is either in an JSObject extension context or it was not found.
- Handle<JSObject> context_ext;
+ // Slow case: The property is not in a context slot. It is either in a
+ // context extension object, a property of the subject of a with, or a
+ // property of the global object.
+ Handle<JSObject> object;
if (!holder.is_null()) {
- // The property exists in the extension context.
- context_ext = Handle<JSObject>::cast(holder);
+ // The property exists on the holder.
+ object = Handle<JSObject>::cast(holder);
} else {
// The property was not found.
ASSERT(attributes == ABSENT);
@@ -8891,22 +9104,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
if (strict_mode == kStrictMode) {
// Throw in strict mode (assignment to undefined variable).
Handle<Object> error =
- isolate->factory()->NewReferenceError(
- "not_defined", HandleVector(&name, 1));
+ isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
return isolate->Throw(*error);
}
- // In non-strict mode, the property is stored in the global context.
+ // In non-strict mode, the property is added to the global object.
attributes = NONE;
- context_ext = Handle<JSObject>(isolate->context()->global());
+ object = Handle<JSObject>(isolate->context()->global());
}
- // Set the property, but ignore if read_only variable on the context
- // extension object itself.
+ // Set the property if it's not read only or doesn't yet exist.
if ((attributes & READ_ONLY) == 0 ||
- (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
+ (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(context_ext, name, value, NONE, strict_mode));
+ SetProperty(object, name, value, NONE, strict_mode));
} else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
@@ -8965,42 +9177,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
-// NOTE: These PrintXXX functions are defined for all builds (not just
-// DEBUG builds) because we may want to be able to trace function
-// calls in all modes.
-static void PrintString(String* str) {
- // not uncommon to have empty strings
- if (str->length() > 0) {
- SmartArrayPointer<char> s =
- str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- PrintF("%s", *s);
- }
-}
-
-
-static void PrintObject(Object* obj) {
- if (obj->IsSmi()) {
- PrintF("%d", Smi::cast(obj)->value());
- } else if (obj->IsString() || obj->IsSymbol()) {
- PrintString(String::cast(obj));
- } else if (obj->IsNumber()) {
- PrintF("%g", obj->Number());
- } else if (obj->IsFailure()) {
- PrintF("<failure>");
- } else if (obj->IsUndefined()) {
- PrintF("<undefined>");
- } else if (obj->IsNull()) {
- PrintF("<null>");
- } else if (obj->IsTrue()) {
- PrintF("<true>");
- } else if (obj->IsFalse()) {
- PrintF("<false>");
- } else {
- PrintF("%p", reinterpret_cast<void*>(obj));
- }
-}
-
-
static int StackSize() {
int n = 0;
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
@@ -9019,38 +9195,33 @@ static void PrintTransition(Object* result) {
}
if (result == NULL) {
- // constructor calls
- JavaScriptFrameIterator it;
- JavaScriptFrame* frame = it.frame();
- if (frame->IsConstructor()) PrintF("new ");
- // function name
- Object* fun = frame->function();
- if (fun->IsJSFunction()) {
- PrintObject(JSFunction::cast(fun)->shared()->name());
- } else {
- PrintObject(fun);
- }
- // function arguments
- // (we are intentionally only printing the actually
- // supplied parameters, not all parameters required)
- PrintF("(this=");
- PrintObject(frame->receiver());
- const int length = frame->ComputeParametersCount();
- for (int i = 0; i < length; i++) {
- PrintF(", ");
- PrintObject(frame->GetParameter(i));
- }
- PrintF(") {\n");
-
+ JavaScriptFrame::PrintTop(stdout, true, false);
+ PrintF(" {\n");
} else {
// function result
PrintF("} -> ");
- PrintObject(result);
+ result->ShortPrint();
PrintF("\n");
}
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceElementsKindTransition) {
+ ASSERT(args.length() == 5);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_SMI_ARG_CHECKED(from_kind, 1);
+ CONVERT_ARG_CHECKED(FixedArrayBase, from_elements, 2);
+ CONVERT_SMI_ARG_CHECKED(to_kind, 3);
+ CONVERT_ARG_CHECKED(FixedArrayBase, to_elements, 4);
+ NoHandleAllocation ha;
+ PrintF("*");
+ obj->PrintElementsTransition(stdout,
+ static_cast<ElementsKind>(from_kind), *from_elements,
+ static_cast<ElementsKind>(to_kind), *to_elements);
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
ASSERT(args.length() == 0);
NoHandleAllocation ha;
@@ -9126,6 +9297,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
FlattenString(str);
CONVERT_ARG_CHECKED(JSArray, output, 1);
+
+ MaybeObject* maybe_result_array =
+ output->EnsureCanContainNonSmiElements();
+ if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastElements());
AssertNoAllocation no_allocation;
@@ -9245,10 +9420,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
}
// Compile source string in the global context.
- Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
- context,
- true,
- kNonStrictMode);
+ Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
+ source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -9261,7 +9434,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<String> source,
Handle<Object> receiver,
- StrictModeFlag strict_mode) {
+ LanguageMode language_mode,
+ int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> global_context = Handle<Context>(context->global_context());
@@ -9279,7 +9453,8 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
source,
Handle<Context>(isolate->context()),
context->IsGlobalContext(),
- strict_mode);
+ language_mode,
+ scope_position);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
Handle<JSFunction> compiled =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -9289,91 +9464,28 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
- ASSERT(args.length() == 4);
-
- HandleScope scope(isolate);
- Handle<Object> callee = args.at<Object>(0);
- Handle<Object> receiver; // Will be overwritten.
-
- // Compute the calling context.
- Handle<Context> context = Handle<Context>(isolate->context(), isolate);
-#ifdef DEBUG
- // Make sure Isolate::context() agrees with the old code that traversed
- // the stack frames to compute the context.
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- ASSERT(Context::cast(frame->context()) == *context);
-#endif
-
- // Find where the 'eval' symbol is bound. It is unaliased only if
- // it is bound in the global context.
- int index = -1;
- PropertyAttributes attributes = ABSENT;
- BindingFlags binding_flags;
- while (true) {
- receiver = context->Lookup(isolate->factory()->eval_symbol(),
- FOLLOW_PROTOTYPE_CHAIN,
- &index,
- &attributes,
- &binding_flags);
- // Stop search when eval is found or when the global context is
- // reached.
- if (attributes != ABSENT || context->IsGlobalContext()) break;
- context = Handle<Context>(context->previous(), isolate);
- }
-
- // If eval could not be resolved, it has been deleted and we need to
- // throw a reference error.
- if (attributes == ABSENT) {
- Handle<Object> name = isolate->factory()->eval_symbol();
- Handle<Object> reference_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return MakePair(isolate->Throw(*reference_error), NULL);
- }
-
- if (!context->IsGlobalContext()) {
- // 'eval' is not bound in the global context. Just call the function
- // with the given arguments. This is not necessarily the global eval.
- if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
- receiver = isolate->factory()->the_hole_value();
- }
- return MakePair(*callee, *receiver);
- }
-
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != isolate->global_context()->global_eval_fun() ||
- !args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->the_hole_value());
- }
-
- ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(isolate,
- args.at<String>(1),
- args.at<Object>(2),
- static_cast<StrictModeFlag>(args.smi_at(3)));
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
- ASSERT(args.length() == 4);
+ ASSERT(args.length() == 5);
HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
+ // If "eval" didn't refer to the original GlobalEval, it's not a
+ // direct call to eval.
+ // (And even if it is, but the first argument isn't a string, just let
+ // execution default to an indirect call to eval, which will also return
+ // the first argument without doing anything).
if (*callee != isolate->global_context()->global_eval_fun() ||
!args[1]->IsString()) {
return MakePair(*callee, isolate->heap()->the_hole_value());
}
- ASSERT(args[3]->IsSmi());
+ CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+ ASSERT(args[4]->IsSmi());
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- static_cast<StrictModeFlag>(args.smi_at(3)));
+ language_mode,
+ args.smi_at(4));
}
@@ -9386,9 +9498,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, func, 0);
- Handle<Map> map = func->shared()->strict_mode()
- ? isolate->strict_mode_function_instance_map()
- : isolate->function_instance_map();
+ Handle<Map> map = func->shared()->is_classic_mode()
+ ? isolate->function_instance_map()
+ : isolate->strict_mode_function_instance_map();
ASSERT(func->map()->instance_type() == map->instance_type());
ASSERT(func->map()->instance_size() == map->instance_size());
@@ -9426,7 +9538,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(JSObject, element, args[1]);
- RUNTIME_ASSERT(array->HasFastElements());
+ RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
@@ -9509,9 +9621,11 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map;
if (fast_elements_) {
- map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
+ map = isolate_->factory()->GetElementsTransitionMap(array,
+ FAST_ELEMENTS);
} else {
- map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
+ map = isolate_->factory()->GetElementsTransitionMap(array,
+ DICTIONARY_ELEMENTS);
}
array->set_map(*map);
array->set_length(*length);
@@ -9566,6 +9680,7 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
uint32_t length = static_cast<uint32_t>(array->length()->Number());
int element_count = 0;
switch (array->GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
@@ -9577,6 +9692,10 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
+ case FAST_DOUBLE_ELEMENTS:
+ // TODO(1810): Decide if it's worthwhile to implement this.
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
Handle<NumberDictionary> dictionary(
NumberDictionary::cast(array->elements()));
@@ -9589,7 +9708,16 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
// External arrays are always dense.
return length;
}
@@ -9655,6 +9783,7 @@ static void CollectElementIndices(Handle<JSObject> object,
List<uint32_t>* indices) {
ElementsKind kind = object->GetElementsKind();
switch (kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
uint32_t length = static_cast<uint32_t>(elements->length());
@@ -9666,6 +9795,11 @@ static void CollectElementIndices(Handle<JSObject> object,
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ // TODO(1810): Decide if it's worthwhile to implement this.
+ UNREACHABLE();
+ break;
+ }
case DICTIONARY_ELEMENTS: {
Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
uint32_t capacity = dict->Capacity();
@@ -9774,6 +9908,7 @@ static bool IterateElements(Isolate* isolate,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
@@ -9788,13 +9923,18 @@ static bool IterateElements(Isolate* isolate,
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- element_value = GetElement(receiver, j);
- if (element_value.is_null()) return false;
+ element_value = Object::GetElement(receiver, j);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ // TODO(1810): Decide if it's worthwhile to implement this.
+ UNREACHABLE();
+ break;
+ }
case DICTIONARY_ELEMENTS: {
Handle<NumberDictionary> dict(receiver->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
@@ -9807,8 +9947,8 @@ static bool IterateElements(Isolate* isolate,
while (j < n) {
HandleScope loop_scope;
uint32_t index = indices[j];
- Handle<Object> element = GetElement(receiver, index);
- if (element.is_null()) return false;
+ Handle<Object> element = Object::GetElement(receiver, index);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
do {
@@ -9905,6 +10045,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
uint32_t element_estimate;
if (obj->IsJSArray()) {
Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ // TODO(1810): Find out if it's worthwhile to properly support
+ // arbitrary ElementsKinds. For now, pessimistically transition to
+ // FAST_ELEMENTS.
+ if (array->HasFastDoubleElements()) {
+ array = Handle<JSArray>::cast(
+ TransitionElementsKind(array, FAST_ELEMENTS));
+ }
length_estimate =
static_cast<uint32_t>(array->length()->Number());
element_estimate =
@@ -10002,15 +10149,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
CONVERT_CHECKED(JSArray, to, args[1]);
FixedArrayBase* new_elements = from->elements();
MaybeObject* maybe_new_map;
+ ElementsKind elements_kind;
if (new_elements->map() == isolate->heap()->fixed_array_map() ||
new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
- maybe_new_map = to->map()->GetFastElementsMap();
+ elements_kind = FAST_ELEMENTS;
} else if (new_elements->map() ==
isolate->heap()->fixed_double_array_map()) {
- maybe_new_map = to->map()->GetFastDoubleElementsMap();
+ elements_kind = FAST_DOUBLE_ELEMENTS;
} else {
- maybe_new_map = to->map()->GetSlowElementsMap();
+ elements_kind = DICTIONARY_ELEMENTS;
}
+ maybe_new_map = to->GetElementsTransitionMap(elements_kind);
Object* new_map;
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
to->set_map(Map::cast(new_map));
@@ -10056,9 +10205,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
}
Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
- Handle<Object> tmp1 = GetElement(jsobject, index1);
+ Handle<Object> tmp1 = Object::GetElement(jsobject, index1);
RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
- Handle<Object> tmp2 = GetElement(jsobject, index2);
+ Handle<Object> tmp2 = Object::GetElement(jsobject, index2);
RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
RETURN_IF_EMPTY_HANDLE(isolate,
@@ -10083,7 +10232,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
if (array->elements()->IsDictionary()) {
// Create an array and get all the keys into it, then remove all the
// keys that are not integers in the range 0 to length-1.
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
+ bool threw = false;
+ Handle<FixedArray> keys =
+ GetKeysInFixedArrayFor(array, INCLUDE_PROTOS, &threw);
+ if (threw) return Failure::Exception();
+
int keys_length = keys->length();
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
@@ -10095,7 +10248,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
- ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
+ ASSERT(array->HasFastElements() ||
+ array->HasFastSmiOnlyElements() ||
+ array->HasFastDoubleElements());
Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
@@ -10214,8 +10369,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
- receiver, structure, name, result->holder());
+ MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
+ receiver, structure, name);
if (!maybe_value->ToObject(&value)) {
if (maybe_value->IsRetryAfterGC()) return maybe_value;
ASSERT(maybe_value->IsException());
@@ -10237,10 +10392,11 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
return heap->undefined_value();
- default:
+ case HANDLER:
UNREACHABLE();
+ return heap->undefined_value();
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return heap->undefined_value();
}
@@ -10306,7 +10462,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
// Try local lookup on each of the objects.
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
- LookupResult result;
+ LookupResult result(isolate);
jsproto->LocalLookup(*name, &result);
if (result.IsProperty()) {
// LookupResult is not GC safe as it holds raw object pointers.
@@ -10363,7 +10519,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(String, name, 1);
- LookupResult result;
+ LookupResult result(isolate);
obj->Lookup(*name, &result);
if (result.IsProperty()) {
return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
@@ -10547,6 +10703,18 @@ static const int kFrameDetailsAtReturnIndex = 7;
static const int kFrameDetailsFlagsIndex = 8;
static const int kFrameDetailsFirstDynamicIndex = 9;
+
+static SaveContext* FindSavedContextForFrame(Isolate* isolate,
+ JavaScriptFrame* frame) {
+ SaveContext* save = isolate->save_context();
+ while (save != NULL && !save->IsBelowFrame(frame)) {
+ save = save->prev();
+ }
+ ASSERT(save != NULL);
+ return save;
+}
+
+
// Return an array with frame details
// args[0]: number: break id
// args[1]: number: frame index
@@ -10602,11 +10770,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->below(it.frame())) {
- save = save->prev();
- }
- ASSERT(save != NULL);
+ SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
// Get the frame id.
Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
@@ -10623,9 +10787,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<SerializedScopeInfo> scope_info(shared->scope_info());
- ASSERT(*scope_info != SerializedScopeInfo::Empty());
- ScopeInfo<> info(*scope_info);
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+ ASSERT(*scope_info != ScopeInfo::Empty());
// Get the locals names and values into a temporary array.
//
@@ -10633,24 +10796,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// (e.g. .result)? For users of the debugger, they will probably be
// confusing.
Handle<FixedArray> locals =
- isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
+ isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
// Fill in the values of the locals.
int i = 0;
- for (; i < info.number_of_stack_slots(); ++i) {
+ for (; i < scope_info->StackLocalCount(); ++i) {
// Use the value from the stack.
- locals->set(i * 2, *info.LocalName(i));
+ locals->set(i * 2, scope_info->LocalName(i));
locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
}
- if (i < info.NumberOfLocals()) {
+ if (i < scope_info->LocalCount()) {
// Get the context containing declarations.
Handle<Context> context(
Context::cast(it.frame()->context())->declaration_context());
- for (; i < info.NumberOfLocals(); ++i) {
- Handle<String> name = info.LocalName(i);
+ for (; i < scope_info->LocalCount(); ++i) {
+ Handle<String> name(scope_info->LocalName(i));
+ VariableMode mode;
+ InitializationFlag init_flag;
locals->set(i * 2, *name);
- locals->set(i * 2 + 1,
- context->get(scope_info->ContextSlotIndex(*name, NULL)));
+ locals->set(i * 2 + 1, context->get(
+ scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
}
}
@@ -10704,7 +10869,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Find the number of arguments to fill. At least fill the number of
// parameters for the function and fill more if more parameters are provided.
- int argument_count = info.number_of_parameters();
+ int argument_count = scope_info->ParameterCount();
if (argument_count < frame_inspector.GetParametersCount()) {
argument_count = frame_inspector.GetParametersCount();
}
@@ -10716,7 +10881,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
- 2 * (argument_count + info.NumberOfLocals()) +
+ 2 * (argument_count + scope_info->LocalCount()) +
(at_return ? 1 : 0);
Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
@@ -10731,7 +10896,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Add the locals count
details->set(kFrameDetailsLocalCountIndex,
- Smi::FromInt(info.NumberOfLocals()));
+ Smi::FromInt(scope_info->LocalCount()));
// Add the source position.
if (position != RelocInfo::kNoPosition) {
@@ -10766,8 +10931,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Add arguments name and value.
for (int i = 0; i < argument_count; i++) {
// Name of the argument.
- if (i < info.number_of_parameters()) {
- details->set(details_index++, *info.parameter_name(i));
+ if (i < scope_info->ParameterCount()) {
+ details->set(details_index++, scope_info->ParameterName(i));
} else {
details->set(details_index++, heap->undefined_value());
}
@@ -10782,7 +10947,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
// Add locals name and value from the temporary copy from the function frame.
- for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
+ for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
details->set(details_index++, locals->get(i));
}
@@ -10795,7 +10960,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject() && !shared->strict_mode() && !shared->native()) {
+ if (!receiver->IsJSObject() &&
+ shared->is_classic_mode() &&
+ !shared->native()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
// value object is not converted into a wrapped JS objects. To
@@ -10818,21 +10985,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Copy all the context locals into an object used to materialize a scope.
static bool CopyContextLocalsToScopeObject(
Isolate* isolate,
- Handle<SerializedScopeInfo> serialized_scope_info,
- ScopeInfo<>& scope_info,
+ Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object) {
// Fill all context locals to the context extension.
- for (int i = Context::MIN_CONTEXT_SLOTS;
- i < scope_info.number_of_context_slots();
- i++) {
- int context_index = serialized_scope_info->ContextSlotIndex(
- *scope_info.context_slot_name(i), NULL);
+ for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ int context_index = scope_info->ContextSlotIndex(
+ scope_info->ContextLocalName(i), &mode, &init_flag);
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(scope_object,
- scope_info.context_slot_name(i),
+ Handle<String>(scope_info->ContextLocalName(i)),
Handle<Object>(context->get(context_index), isolate),
NONE,
kNonStrictMode),
@@ -10851,8 +11017,7 @@ static Handle<JSObject> MaterializeLocalScope(
int inlined_frame_index) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
- ScopeInfo<> scope_info(*serialized_scope_info);
+ Handle<ScopeInfo> scope_info(shared->scope_info());
FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
// Allocate and initialize a JSObject with all the arguments, stack locals
@@ -10861,11 +11026,11 @@ static Handle<JSObject> MaterializeLocalScope(
isolate->factory()->NewJSObject(isolate->object_function());
// First fill all parameters.
- for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
- scope_info.parameter_name(i),
+ Handle<String>(scope_info->ParameterName(i)),
Handle<Object>(frame_inspector.GetParameter(i)),
NONE,
kNonStrictMode),
@@ -10873,24 +11038,23 @@ static Handle<JSObject> MaterializeLocalScope(
}
// Second fill all stack locals.
- for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
- scope_info.stack_slot_name(i),
+ Handle<String>(scope_info->StackLocalName(i)),
Handle<Object>(frame_inspector.GetExpression(i)),
NONE,
kNonStrictMode),
Handle<JSObject>());
}
- if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+ if (scope_info->HasContext()) {
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- function_context, local_scope)) {
+ if (!CopyContextLocalsToScopeObject(
+ isolate, scope_info, function_context, local_scope)) {
return Handle<JSObject>();
}
@@ -10900,7 +11064,11 @@ static Handle<JSObject> MaterializeLocalScope(
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+ bool threw = false;
+ Handle<FixedArray> keys =
+ GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+ if (threw) return Handle<JSObject>();
+
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
@@ -10929,8 +11097,7 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
ASSERT(context->IsFunctionContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
- ScopeInfo<> scope_info(*serialized_scope_info);
+ Handle<ScopeInfo> scope_info(shared->scope_info());
// Allocate and initialize a JSObject with all the content of theis function
// closure.
@@ -10938,9 +11105,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- context, closure_scope)) {
+ if (!CopyContextLocalsToScopeObject(
+ isolate, scope_info, context, closure_scope)) {
return Handle<JSObject>();
}
@@ -10948,7 +11114,11 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+ bool threw = false;
+ Handle<FixedArray> keys =
+ GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+ if (threw) return Handle<JSObject>();
+
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
@@ -10991,9 +11161,7 @@ static Handle<JSObject> MaterializeBlockScope(
Isolate* isolate,
Handle<Context> context) {
ASSERT(context->IsBlockContext());
- Handle<SerializedScopeInfo> serialized_scope_info(
- SerializedScopeInfo::cast(context->extension()));
- ScopeInfo<> scope_info(*serialized_scope_info);
+ Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
@@ -11001,21 +11169,19 @@ static Handle<JSObject> MaterializeBlockScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- context, block_scope)) {
- return Handle<JSObject>();
- }
+ if (!CopyContextLocalsToScopeObject(
+ isolate, scope_info, context, block_scope)) {
+ return Handle<JSObject>();
}
return block_scope;
}
-// Iterate over the actual scopes visible from a stack frame. All scopes are
+// Iterate over the actual scopes visible from a stack frame. The iteration
+// proceeds from the innermost visible nested scope outwards. All scopes are
// backed by an actual context except the local scope, which is inserted
-// "artifically" in the context chain.
+// "artificially" in the context chain.
class ScopeIterator {
public:
enum ScopeType {
@@ -11035,27 +11201,83 @@ class ScopeIterator {
inlined_frame_index_(inlined_frame_index),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
- local_done_(false),
- at_local_(false) {
-
- // Check whether the first scope is actually a local scope.
- // If there is a stack slot for .result then this local scope has been
- // created for evaluating top level code and it is not a real local scope.
- // Checking for the existence of .result seems fragile, but the scope info
- // saved with the code object does not otherwise have that information.
- int index = function_->shared()->scope_info()->
- StackSlotIndex(isolate_->heap()->result_symbol());
- if (index >= 0) {
- local_done_ = true;
- } else if (context_->IsGlobalContext() ||
- context_->IsFunctionContext()) {
- at_local_ = true;
- } else if (context_->closure() != *function_) {
- // The context_ is a block or with or catch block from the outer function.
- ASSERT(context_->IsWithContext() ||
- context_->IsCatchContext() ||
- context_->IsBlockContext());
- at_local_ = true;
+ nested_scope_chain_(4) {
+
+ // Catch the case when the debugger stops in an internal function.
+ Handle<SharedFunctionInfo> shared_info(function_->shared());
+ Handle<ScopeInfo> scope_info(shared_info->scope_info());
+ if (shared_info->script() == isolate->heap()->undefined_value()) {
+ while (context_->closure() == *function_) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ return;
+ }
+
+ // Get the debug info (create it if it does not exist).
+ if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
+ // Return if ensuring debug info failed.
+ return;
+ }
+ Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
+
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ if (break_location_iterator.IsExit()) {
+ // We are within the return sequence. At the momemt it is not possible to
+ // get a source position which is consistent with the current scope chain.
+ // Thus all nested with, catch and block contexts are skipped and we only
+ // provide the function scope.
+ if (scope_info->HasContext()) {
+ context_ = Handle<Context>(context_->declaration_context(), isolate_);
+ } else {
+ while (context_->closure() == *function_) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ }
+ if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
+ } else {
+ // Reparse the code and analyze the scopes.
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ Handle<Script> script(Script::cast(shared_info->script()));
+ Scope* scope = NULL;
+
+ // Check whether we are in global, eval or function code.
+ Handle<ScopeInfo> scope_info(shared_info->scope_info());
+ if (scope_info->Type() != FUNCTION_SCOPE) {
+ // Global or eval code.
+ CompilationInfo info(script);
+ if (scope_info->Type() == GLOBAL_SCOPE) {
+ info.MarkAsGlobal();
+ } else {
+ ASSERT(scope_info->Type() == EVAL_SCOPE);
+ info.MarkAsEval();
+ info.SetCallingContext(Handle<Context>(function_->context()));
+ }
+ if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+ scope = info.function()->scope();
+ }
+ } else {
+ // Function code
+ CompilationInfo info(shared_info);
+ if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+ scope = info.function()->scope();
+ }
+ }
+
+ // Retrieve the scope chain for the current position.
+ if (scope != NULL) {
+ int source_position = shared_info->code()->SourcePosition(frame_->pc());
+ scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+ } else {
+ // A failed reparse indicates that the preparser has diverged from the
+ // parser or that the preparse data given to the initial parse has been
+ // faulty. We fail in debug mode but in release mode we only provide the
+ // information we get from the context chain but nothing about
+ // completely stack allocated scopes or stack allocated locals.
+ UNREACHABLE();
+ }
}
}
@@ -11064,40 +11286,49 @@ class ScopeIterator {
// Move to the next scope.
void Next() {
- // If at a local scope mark the local scope as passed.
- if (at_local_) {
- at_local_ = false;
- local_done_ = true;
-
- // If the current context is not associated with the local scope the
- // current context is the next real scope, so don't move to the next
- // context in this case.
- if (context_->closure() != *function_) {
- return;
- }
- }
-
- // The global scope is always the last in the chain.
- if (context_->IsGlobalContext()) {
+ ScopeType scope_type = Type();
+ if (scope_type == ScopeTypeGlobal) {
+ // The global scope is always the last in the chain.
+ ASSERT(context_->IsGlobalContext());
context_ = Handle<Context>();
return;
}
-
- // Move to the next context.
- context_ = Handle<Context>(context_->previous(), isolate_);
-
- // If passing the local scope indicate that the current scope is now the
- // local scope.
- if (!local_done_ &&
- (context_->IsGlobalContext() || context_->IsFunctionContext())) {
- at_local_ = true;
+ if (nested_scope_chain_.is_empty()) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ } else {
+ if (nested_scope_chain_.last()->HasContext()) {
+ ASSERT(context_->previous() != NULL);
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ nested_scope_chain_.RemoveLast();
}
}
// Return the type of the current scope.
ScopeType Type() {
- if (at_local_) {
- return ScopeTypeLocal;
+ if (!nested_scope_chain_.is_empty()) {
+ Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+ switch (scope_info->Type()) {
+ case FUNCTION_SCOPE:
+ ASSERT(context_->IsFunctionContext() ||
+ !scope_info->HasContext());
+ return ScopeTypeLocal;
+ case GLOBAL_SCOPE:
+ ASSERT(context_->IsGlobalContext());
+ return ScopeTypeGlobal;
+ case WITH_SCOPE:
+ ASSERT(context_->IsWithContext());
+ return ScopeTypeWith;
+ case CATCH_SCOPE:
+ ASSERT(context_->IsCatchContext());
+ return ScopeTypeCatch;
+ case BLOCK_SCOPE:
+ ASSERT(!scope_info->HasContext() ||
+ context_->IsBlockContext());
+ return ScopeTypeBlock;
+ case EVAL_SCOPE:
+ UNREACHABLE();
+ }
}
if (context_->IsGlobalContext()) {
ASSERT(context_->global()->IsGlobalObject());
@@ -11123,6 +11354,7 @@ class ScopeIterator {
return Handle<JSObject>(CurrentContext()->global());
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
+ ASSERT(nested_scope_chain_.length() == 1);
return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
case ScopeIterator::ScopeTypeWith:
// Return the with object.
@@ -11139,13 +11371,28 @@ class ScopeIterator {
return Handle<JSObject>();
}
+ Handle<ScopeInfo> CurrentScopeInfo() {
+ if (!nested_scope_chain_.is_empty()) {
+ return nested_scope_chain_.last();
+ } else if (context_->IsBlockContext()) {
+ return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
+ } else if (context_->IsFunctionContext()) {
+ return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
+ }
+ return Handle<ScopeInfo>::null();
+ }
+
// Return the context for this scope. For the local context there might not
// be an actual context.
Handle<Context> CurrentContext() {
- if (at_local_ && context_->closure() != *function_) {
+ if (Type() == ScopeTypeGlobal ||
+ nested_scope_chain_.is_empty()) {
+ return context_;
+ } else if (nested_scope_chain_.last()->HasContext()) {
+ return context_;
+ } else {
return Handle<Context>();
}
- return context_;
}
#ifdef DEBUG
@@ -11159,8 +11406,7 @@ class ScopeIterator {
case ScopeIterator::ScopeTypeLocal: {
PrintF("Local:\n");
- ScopeInfo<> scope_info(function_->shared()->scope_info());
- scope_info.Print();
+ function_->shared()->scope_info()->Print();
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
@@ -11208,8 +11454,7 @@ class ScopeIterator {
int inlined_frame_index_;
Handle<JSFunction> function_;
Handle<Context> context_;
- bool local_done_;
- bool at_local_;
+ List<Handle<ScopeInfo> > nested_scope_chain_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
@@ -11465,48 +11710,53 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
int target_start_position = RelocInfo::kNoPosition;
Handle<SharedFunctionInfo> target;
while (!done) {
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contain only one function
- // declartion the source for the top-level and the function is
- // the same. In that case prefer the non top-level function.
- if (!shared->is_toplevel()) {
+ { // Extra scope for iterator and no-allocation.
+ isolate->heap()->EnsureHeapIsIterable();
+ AssertNoAllocation no_alloc_during_heap_iteration;
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL; obj = iterator.next()) {
+ if (obj->IsSharedFunctionInfo()) {
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+ if (shared->script() == *script) {
+ // If the SharedFunctionInfo found has the requested script data and
+ // contains the source position it is a candidate.
+ int start_position = shared->function_token_position();
+ if (start_position == RelocInfo::kNoPosition) {
+ start_position = shared->start_position();
+ }
+ if (start_position <= position &&
+ position <= shared->end_position()) {
+ // If there is no candidate or this function is within the current
+ // candidate this is the new candidate.
+ if (target.is_null()) {
+ target_start_position = start_position;
+ target = shared;
+ } else {
+ if (target_start_position == start_position &&
+ shared->end_position() == target->end_position()) {
+ // If a top-level function contain only one function
+ // declartion the source for the top-level and the
+ // function is the same. In that case prefer the non
+ // top-level function.
+ if (!shared->is_toplevel()) {
+ target_start_position = start_position;
+ target = shared;
+ }
+ } else if (target_start_position <= start_position &&
+ shared->end_position() <= target->end_position()) {
+ // This containment check includes equality as a function
+ // inside a top-level function can share either start or end
+ // position with the top-level function.
target_start_position = start_position;
target = shared;
}
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function inside
- // a top-level function can share either start or end position
- // with the top-level function.
- target_start_position = start_position;
- target = shared;
}
}
}
}
- }
- }
+ } // End for loop.
+ } // End No allocation scope.
if (target.is_null()) {
return isolate->heap()->undefined_value();
@@ -11519,9 +11769,9 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
if (!done) {
// If the candidate is not compiled compile it to reveal any inner
// functions which might contain the requested source position.
- CompileLazyShared(target, KEEP_EXCEPTION);
+ SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
}
- }
+ } // End while loop.
return *target;
}
@@ -11667,46 +11917,65 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
// Creates a copy of the with context chain. The copy of the context chain is
// is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Context> current,
- Handle<Context> base) {
- // At the end of the chain. Return the base context to link to.
- if (current->IsFunctionContext() || current->IsGlobalContext()) {
- return base;
+static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Context> base,
+ JavaScriptFrame* frame,
+ int inlined_frame_index) {
+ HandleScope scope(isolate);
+ List<Handle<ScopeInfo> > scope_chain;
+ List<Handle<Context> > context_chain;
+
+ ScopeIterator it(isolate, frame, inlined_frame_index);
+ for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
+ it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
+ ASSERT(!it.Done());
+ scope_chain.Add(it.CurrentScopeInfo());
+ context_chain.Add(it.CurrentContext());
}
- // Recursively copy the with and catch contexts.
- HandleScope scope(isolate);
- Handle<Context> previous(current->previous());
- Handle<Context> new_previous =
- CopyWithContextChain(isolate, function, previous, base);
- Handle<Context> new_current;
- if (current->IsCatchContext()) {
- Handle<String> name(String::cast(current->extension()));
- Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
- new_current =
- isolate->factory()->NewCatchContext(function,
- new_previous,
- name,
- thrown_object);
- } else if (current->IsBlockContext()) {
- Handle<SerializedScopeInfo> scope_info(
- SerializedScopeInfo::cast(current->extension()));
- new_current =
- isolate->factory()->NewBlockContext(function, new_previous, scope_info);
- // Copy context slots.
- int num_context_slots = scope_info->NumberOfContextSlots();
- for (int i = Context::MIN_CONTEXT_SLOTS; i < num_context_slots; ++i) {
- new_current->set(i, current->get(i));
+ // At the end of the chain. Return the base context to link to.
+ Handle<Context> context = base;
+
+ // Iteratively copy and or materialize the nested contexts.
+ while (!scope_chain.is_empty()) {
+ Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
+ Handle<Context> current = context_chain.RemoveLast();
+ ASSERT(!(scope_info->HasContext() & current.is_null()));
+
+ if (scope_info->Type() == CATCH_SCOPE) {
+ Handle<String> name(String::cast(current->extension()));
+ Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+ context =
+ isolate->factory()->NewCatchContext(function,
+ context,
+ name,
+ thrown_object);
+ } else if (scope_info->Type() == BLOCK_SCOPE) {
+ // Materialize the contents of the block scope into a JSObject.
+ Handle<JSObject> block_scope_object =
+ MaterializeBlockScope(isolate, current);
+ if (block_scope_object.is_null()) {
+ return Handle<Context>::null();
+ }
+ // Allocate a new function context for the debug evaluation and set the
+ // extension object.
+ Handle<Context> new_context =
+ isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
+ function);
+ new_context->set_extension(*block_scope_object);
+ new_context->set_previous(*context);
+ context = new_context;
+ } else {
+ ASSERT(scope_info->Type() == WITH_SCOPE);
+ ASSERT(current->IsWithContext());
+ Handle<JSObject> extension(JSObject::cast(current->extension()));
+ context =
+ isolate->factory()->NewWithContext(function, context, extension);
}
- } else {
- ASSERT(current->IsWithContext());
- Handle<JSObject> extension(JSObject::cast(current->extension()));
- new_current =
- isolate->factory()->NewWithContext(function, new_previous, extension);
}
- return scope.CloseAndEscape(new_current);
+
+ return scope.CloseAndEscape(context);
}
@@ -11716,23 +11985,24 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate,
JavaScriptFrame* frame,
int inlined_frame_index,
Handle<JSFunction> function,
- Handle<SerializedScopeInfo> scope_info,
- const ScopeInfo<>* sinfo,
+ Handle<ScopeInfo> scope_info,
Handle<Context> function_context) {
// Try to find the value of 'arguments' to pass as parameter. If it is not
// found (that is the debugged function does not reference 'arguments' and
// does not support eval) then create an 'arguments' object.
int index;
- if (sinfo->number_of_stack_slots() > 0) {
+ if (scope_info->StackLocalCount() > 0) {
index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index != -1) {
return Handle<Object>(frame->GetExpression(index), isolate);
}
}
- if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
- NULL);
+ if (scope_info->HasHeapAllocatedLocals()) {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ index = scope_info->ContextSlotIndex(
+ isolate->heap()->arguments_symbol(), &mode, &init_flag);
if (index != -1) {
return Handle<Object>(function_context->get(index), isolate);
}
@@ -11797,16 +12067,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
- ScopeInfo<> sinfo(*scope_info);
+ Handle<ScopeInfo> scope_info(function->shared()->scope_info());
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->below(frame)) {
- save = save->prev();
- }
- ASSERT(save != NULL);
+ SaveContext* save = FindSavedContextForFrame(isolate, frame);
+
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
@@ -11821,9 +12087,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
isolate->factory()->undefined_value());
go_between->set_context(function->context());
#ifdef DEBUG
- ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
- ASSERT(go_between_sinfo.number_of_parameters() == 0);
- ASSERT(go_between_sinfo.number_of_context_slots() == 0);
+ Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
+ ASSERT(go_between_scope_info->ParameterCount() == 0);
+ ASSERT(go_between_scope_info->ContextLocalCount() == 0);
#endif
// Materialize the content of the local scope into a JSObject.
@@ -11841,10 +12107,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context;
// Get the function's context if it has one.
- if (scope_info->HasHeapAllocatedLocals()) {
+ if (scope_info->HasContext()) {
function_context = Handle<Context>(frame_context->declaration_context());
}
- context = CopyWithContextChain(isolate, go_between, frame_context, context);
+ context = CopyNestedScopeContextChain(isolate,
+ go_between,
+ context,
+ frame,
+ inlined_frame_index);
if (additional_context->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@@ -11868,7 +12138,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Compiler::CompileEval(function_source,
context,
context->IsGlobalContext(),
- kNonStrictMode);
+ CLASSIC_MODE,
+ RelocInfo::kNoPosition);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
@@ -11882,17 +12153,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
if (has_pending_exception) return Failure::Exception();
Handle<Object> arguments = GetArgumentsObject(isolate,
- frame, inlined_frame_index,
- function, scope_info,
- &sinfo, function_context);
+ frame,
+ inlined_frame_index,
+ function,
+ scope_info,
+ function_context);
// Invoke the evaluation function and return the result.
- const int argc = 2;
- Object** argv[argc] = { arguments.location(),
- Handle<Object>::cast(source).location() };
+ Handle<Object> argv[] = { arguments, source };
Handle<Object> result =
- Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
- argc, argv, &has_pending_exception);
+ Execution::Call(Handle<JSFunction>::cast(evaluation_function),
+ receiver,
+ ARRAY_SIZE(argv),
+ argv,
+ &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
// Skip the global proxy as it has no properties and always delegates to the
@@ -11942,15 +12216,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
bool is_global = true;
if (additional_context->IsJSObject()) {
- // Create a function context first, than put 'with' context on top of it.
- Handle<JSFunction> go_between = isolate->factory()->NewFunction(
- isolate->factory()->empty_string(),
- isolate->factory()->undefined_value());
- go_between->set_context(*context);
- context =
- isolate->factory()->NewFunctionContext(
- Context::MIN_CONTEXT_SLOTS, go_between);
- context->set_extension(JSObject::cast(*additional_context));
+ // Create a new with context with the additional context information between
+ // the context of the debugged function and the eval code to be executed.
+ context = isolate->factory()->NewWithContext(
+ Handle<JSFunction>(context->closure()),
+ context,
+ Handle<JSObject>::cast(additional_context));
is_global = false;
}
@@ -11958,7 +12229,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// Currently, the eval code will be executed in non-strict mode,
// even in the strict code context.
Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source, context, is_global, kNonStrictMode);
+ Compiler::CompileEval(source,
+ context,
+ is_global,
+ CLASSIC_MODE,
+ RelocInfo::kNoPosition);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(
@@ -11971,6 +12246,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
Handle<Object> result =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
+ // Clear the oneshot breakpoints so that the debugger does not step further.
+ isolate->debug()->ClearStepping();
if (has_pending_exception) return Failure::Exception();
return *result;
}
@@ -11998,13 +12275,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
// Return result as a JS array.
Handle<JSObject> result =
isolate->factory()->NewJSObject(isolate->array_function());
- Handle<JSArray>::cast(result)->SetContent(*instances);
+ isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
return *result;
}
// Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(JSObject* target,
+static int DebugReferencedBy(HeapIterator* iterator,
+ JSObject* target,
Object* instance_filter, int max_references,
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
@@ -12014,9 +12292,8 @@ static int DebugReferencedBy(JSObject* target,
// Iterate the heap.
int count = 0;
JSObject* last = NULL;
- HeapIterator iterator;
HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator.next()) != NULL) &&
+ while (((heap_obj = iterator->next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
if (heap_obj->IsJSObject()) {
@@ -12081,7 +12358,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ // The heap iterator reserves the right to do a GC to make the heap iterable.
+ // Due to the GC above we know it won't need to do that, but it seems cleaner
+ // to get the heap iterator constructed before we start having unprotected
+ // Object* locals that are not protected by handles.
// Check parameters.
CONVERT_CHECKED(JSObject, target, args[0]);
@@ -12091,6 +12372,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
RUNTIME_ASSERT(max_references >= 0);
+
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
isolate->context()->global_context()->arguments_boilerplate();
@@ -12099,7 +12381,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the number of referencing objects.
int count;
- count = DebugReferencedBy(target, instance_filter, max_references,
+ HeapIterator heap_iterator;
+ count = DebugReferencedBy(&heap_iterator,
+ target, instance_filter, max_references,
NULL, 0, arguments_function);
// Allocate an array to hold the result.
@@ -12110,30 +12394,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
FixedArray* instances = FixedArray::cast(object);
// Fill the referencing objects.
- count = DebugReferencedBy(target, instance_filter, max_references,
+ // AllocateFixedArray above does not make the heap non-iterable.
+ ASSERT(HEAP->IsHeapIterable());
+ HeapIterator heap_iterator2;
+ count = DebugReferencedBy(&heap_iterator2,
+ target, instance_filter, max_references,
instances, count, arguments_function);
// Return result as JS array.
Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
isolate->context()->global_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- JSArray::cast(result)->SetContent(instances);
- return result;
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ return JSArray::cast(result)->SetContent(instances);
}
// Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(JSFunction* constructor, int max_references,
- FixedArray* instances, int instances_size) {
+static int DebugConstructedBy(HeapIterator* iterator,
+ JSFunction* constructor,
+ int max_references,
+ FixedArray* instances,
+ int instances_size) {
AssertNoAllocation no_alloc;
// Iterate the heap.
int count = 0;
- HeapIterator iterator;
HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator.next()) != NULL) &&
+ while (((heap_obj = iterator->next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
if (heap_obj->IsJSObject()) {
@@ -12161,7 +12449,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- isolate->heap()->CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -12170,7 +12458,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Get the number of referencing objects.
int count;
- count = DebugConstructedBy(constructor, max_references, NULL, 0);
+ HeapIterator heap_iterator;
+ count = DebugConstructedBy(&heap_iterator,
+ constructor,
+ max_references,
+ NULL,
+ 0);
// Allocate an array to hold the result.
Object* object;
@@ -12179,8 +12472,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
}
FixedArray* instances = FixedArray::cast(object);
+ ASSERT(HEAP->IsHeapIterable());
// Fill the referencing objects.
- count = DebugConstructedBy(constructor, max_references, instances, count);
+ HeapIterator heap_iterator2;
+ count = DebugConstructedBy(&heap_iterator2,
+ constructor,
+ max_references,
+ instances,
+ count);
// Return result as JS array.
Object* result;
@@ -12188,8 +12487,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
isolate->context()->global_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- JSArray::cast(result)->SetContent(instances);
- return result;
+ return JSArray::cast(result)->SetContent(instances);
}
@@ -12219,7 +12517,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
Handle<SharedFunctionInfo> shared(func->shared());
- if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+ if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->code()->PrintLn();
@@ -12235,7 +12533,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
Handle<SharedFunctionInfo> shared(func->shared());
- if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+ if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
shared->construct_stub()->PrintLn();
@@ -12253,14 +12551,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
}
-static int FindSharedFunctionInfosForScript(Script* script,
+static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
+ Script* script,
FixedArray* buffer) {
AssertNoAllocation no_allocations;
-
int counter = 0;
int buffer_size = buffer->length();
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ for (HeapObject* obj = iterator->next();
+ obj != NULL;
+ obj = iterator->next()) {
ASSERT(obj != NULL);
if (!obj->IsSharedFunctionInfo()) {
continue;
@@ -12286,16 +12585,30 @@ RUNTIME_FUNCTION(MaybeObject*,
HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, script_value, args[0]);
+
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
const int kBufferSize = 32;
Handle<FixedArray> array;
array = isolate->factory()->NewFixedArray(kBufferSize);
- int number = FindSharedFunctionInfosForScript(*script, *array);
+ int number;
+ {
+ isolate->heap()->EnsureHeapIsIterable();
+ AssertNoAllocation no_allocations;
+ HeapIterator heap_iterator;
+ Script* scr = *script;
+ FixedArray* arr = *array;
+ number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+ }
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
- FindSharedFunctionInfosForScript(*script, *array);
+ isolate->heap()->EnsureHeapIsIterable();
+ AssertNoAllocation no_allocations;
+ HeapIterator heap_iterator;
+ Script* scr = *script;
+ FixedArray* arr = *array;
+ FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
}
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
@@ -12776,6 +13089,8 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// Scan the heap for Script objects to find the script with the requested
// script data.
Handle<Script> script;
+ script_name->GetHeap()->EnsureHeapIsIterable();
+ AssertNoAllocation no_allocation_during_heap_iteration;
HeapIterator iterator;
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -12824,34 +13139,32 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame,
Object* caller,
bool* seen_caller) {
// Only display JS frames.
- if (!raw_frame->is_java_script())
+ if (!raw_frame->is_java_script()) {
return false;
+ }
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
Object* raw_fun = frame->function();
// Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction())
+ if (!raw_fun->IsJSFunction()) {
return false;
+ }
if ((raw_fun == caller) && !(*seen_caller)) {
*seen_caller = true;
return false;
}
// Skip all frames until we've seen the caller.
if (!(*seen_caller)) return false;
- // Also, skip the most obvious builtin calls. We recognize builtins
- // as (1) functions called with the builtins object as the receiver and
- // as (2) functions from native scripts called with undefined as the
- // receiver (direct calls to helper functions in the builtins
- // code). Some builtin calls (such as Number.ADD which is invoked
- // using 'call') are very difficult to recognize so we're leaving
- // them in for now.
- if (frame->receiver()->IsJSBuiltinsObject()) {
- return false;
- }
- JSFunction* fun = JSFunction::cast(raw_fun);
- Object* raw_script = fun->shared()->script();
- if (frame->receiver()->IsUndefined() && raw_script->IsScript()) {
- int script_type = Script::cast(raw_script)->type()->value();
- return script_type != Script::TYPE_NATIVE;
+ // Also, skip non-visible built-in functions and any call with the builtins
+ // object as receiver, so as to not reveal either the builtins object or
+ // an internal function.
+ // The --builtins-in-stack-traces command line flag allows including
+ // internal call sites in the stack trace for debugging purposes.
+ if (!FLAG_builtins_in_stack_traces) {
+ JSFunction* fun = JSFunction::cast(raw_fun);
+ if (frame->receiver()->IsJSBuiltinsObject() ||
+ (fun->IsBuiltin() && !fun->shared()->native())) {
+ return false;
+ }
}
return true;
}
@@ -12987,18 +13300,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
// TODO(antonm): consider passing a receiver when constructing a cache.
Handle<Object> receiver(isolate->global_context()->global());
// This handle is nor shared, nor used later, so it's safe.
- Object** argv[] = { key_handle.location() };
- bool pending_exception = false;
+ Handle<Object> argv[] = { key_handle };
+ bool pending_exception;
value = Execution::Call(factory,
receiver,
- 1,
+ ARRAY_SIZE(argv),
argv,
&pending_exception);
if (pending_exception) return Failure::Exception();
}
#ifdef DEBUG
- cache_handle->JSFunctionResultCacheVerify();
+ if (FLAG_verify_heap) {
+ cache_handle->JSFunctionResultCacheVerify();
+ }
#endif
// Function invocation may have cleared the cache. Reread all the data.
@@ -13027,7 +13342,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
cache_handle->set_finger_index(index);
#ifdef DEBUG
- cache_handle->JSFunctionResultCacheVerify();
+ if (FLAG_verify_heap) {
+ cache_handle->JSFunctionResultCacheVerify();
+ }
#endif
return *value;
@@ -13144,6 +13461,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
@@ -13160,6 +13478,14 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
+ ASSERT(args.length() == 2);
+ CONVERT_CHECKED(JSObject, obj1, args[0]);
+ CONVERT_CHECKED(JSObject, obj2, args[1]);
+ return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
+}
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
@@ -13227,6 +13553,9 @@ void Runtime::PerformGC(Object* result) {
Isolate* isolate = Isolate::Current();
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
+ if (isolate->heap()->new_space()->AddFreshPage()) {
+ return;
+ }
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
isolate->heap()->CollectGarbage(failure->allocation_space());
@@ -13234,7 +13563,7 @@ void Runtime::PerformGC(Object* result) {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
isolate->counters()->gc_last_resort_from_js()->Increment();
- isolate->heap()->CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
}
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 1538b7d84..b13662df0 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -69,7 +69,6 @@ namespace internal {
\
F(GetPrototype, 1, 1) \
F(IsInPrototypeChain, 2, 1) \
- F(SetHiddenPrototype, 2, 1) \
\
F(IsConstructCall, 0, 1) \
\
@@ -80,6 +79,7 @@ namespace internal {
\
/* Utilities */ \
F(CheckIsBootstrapping, 0, 1) \
+ F(Call, -1 /* >= 2 */, 1) \
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
@@ -98,6 +98,7 @@ namespace internal {
F(SetNewFunctionAttributes, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
F(SetNativeFlag, 1, 1) \
+ F(StoreArrayLiteralElement, 5, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -142,7 +143,7 @@ namespace internal {
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
\
/* Bit operations */ \
F(NumberOr, 2, 1) \
@@ -211,14 +212,14 @@ namespace internal {
/* Reflection */ \
F(FunctionSetInstanceClassName, 2, 1) \
F(FunctionSetLength, 2, 1) \
- F(BoundFunctionSetLength, 2, 1) \
F(FunctionSetPrototype, 2, 1) \
F(FunctionSetReadOnlyPrototype, 1, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionSetBound, 1, 1) \
+ F(FunctionBindArguments, 4, 1) \
+ F(BoundFunctionGetBindings, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScript, 1, 1) \
@@ -246,7 +247,7 @@ namespace internal {
F(DateLocalTimezone, 1, 1) \
F(DateLocalTimeOffset, 0, 1) \
F(DateDaylightSavingsOffset, 1, 1) \
- F(DateMakeDay, 3, 1) \
+ F(DateMakeDay, 2, 1) \
F(DateYMDFromTime, 2, 1) \
\
/* Numbers */ \
@@ -257,8 +258,7 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 4, 2) \
- F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
+ F(ResolvePossiblyDirectEval, 5, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
@@ -278,9 +278,6 @@ namespace internal {
\
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateArrayLiteralBoilerplate, 3, 1) \
- F(CloneLiteralBoilerplate, 1, 1) \
- F(CloneShallowLiteralBoilerplate, 1, 1) \
F(CreateObjectLiteral, 4, 1) \
F(CreateObjectLiteralShallow, 4, 1) \
F(CreateArrayLiteral, 3, 1) \
@@ -296,6 +293,17 @@ namespace internal {
F(GetConstructTrap, 1, 1) \
F(Fix, 1, 1) \
\
+ /* Harmony sets */ \
+ F(SetInitialize, 1, 1) \
+ F(SetAdd, 2, 1) \
+ F(SetHas, 2, 1) \
+ F(SetDelete, 2, 1) \
+ \
+ /* Harmony maps */ \
+ F(MapInitialize, 1, 1) \
+ F(MapGet, 2, 1) \
+ F(MapSet, 3, 1) \
+ \
/* Harmony weakmaps */ \
F(WeakMapInitialize, 1, 1) \
F(WeakMapGet, 2, 1) \
@@ -304,7 +312,7 @@ namespace internal {
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewObject, 1, 1) \
- F(NewObjectFromBound, 2, 1) \
+ F(NewObjectFromBound, 1, 1) \
F(FinalizeInstanceSize, 1, 1) \
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
@@ -333,6 +341,7 @@ namespace internal {
/* Debugging */ \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
+ F(TraceElementsKindTransition, 5, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(Abort, 2, 1) \
@@ -354,6 +363,7 @@ namespace internal {
F(IS_VAR, 1, 1) \
\
/* expose boolean functions from objects-inl.h */ \
+ F(HasFastSmiOnlyElements, 1, 1) \
F(HasFastElements, 1, 1) \
F(HasFastDoubleElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
@@ -367,6 +377,9 @@ namespace internal {
F(HasExternalUnsignedIntElements, 1, 1) \
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1) \
+ F(TransitionElementsSmiToDouble, 1, 1) \
+ F(TransitionElementsDoubleToObject, 1, 1) \
+ F(HaveSameMap, 2, 1) \
/* profiler */ \
F(ProfilerResume, 0, 1) \
F(ProfilerPause, 0, 1)
@@ -492,6 +505,7 @@ namespace internal {
F(MathPow, 2, 1) \
F(MathSin, 1, 1) \
F(MathCos, 1, 1) \
+ F(MathTan, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathLog, 1, 1) \
F(IsRegExpEquivalent, 2, 1) \
@@ -624,16 +638,14 @@ class Runtime : public AllStatic {
static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
- // TODO(1240886): The following three methods are *not* handle safe,
- // but accept handle arguments. This seems fragile.
+ // TODO(1240886): Some of the following methods are *not* handle safe, but
+ // accept handle arguments. This seems fragile.
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
Handle<Object> object,
uint32_t index);
- MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
- uint32_t index);
MUST_USE_RESULT static MaybeObject* SetObjectProperty(
Isolate* isolate,
@@ -673,11 +685,9 @@ class Runtime : public AllStatic {
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
-enum kDeclareGlobalsFlags {
- kDeclareGlobalsEvalFlag = 1 << 0,
- kDeclareGlobalsStrictModeFlag = 1 << 1,
- kDeclareGlobalsNativeFlag = 1 << 2
-};
+class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
+class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
+class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 14ff1b69c..d0cdb3ef6 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -355,7 +355,7 @@ function IN(x) {
if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
- return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
+ return %_IsNonNegativeSmi(this) ?
%HasElement(x, this) : %HasProperty(x, %ToString(this));
}
@@ -375,6 +375,12 @@ function INSTANCE_OF(F) {
return 1;
}
+ // Check if function is bound, if so, get [[BoundFunction]] from it
+ // and use that instead of F.
+ var bindings = %BoundFunctionGetBindings(F);
+ if (bindings) {
+ F = bindings[kBoundFunctionIndex]; // Always a non-bound function.
+ }
// Get the prototype of F; if it is not an object, throw an error.
var O = F.prototype;
if (!IS_SPEC_OBJECT(O)) {
@@ -386,13 +392,6 @@ function INSTANCE_OF(F) {
}
-// Get an array of property keys for the given object. Used in
-// for-in statements.
-function GET_KEYS() {
- return %GetPropertyNames(this);
-}
-
-
// Filter a given key against an object by checking if the object
// has a property with the given key; return the key as a string if
// it has. Otherwise returns 0 (smi). Used in for-in statements.
@@ -429,20 +428,10 @@ function CALL_FUNCTION_PROXY() {
}
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
- var arity = %_ArgumentsLength() - 1;
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
+ var proxy = this;
var trap = %GetConstructTrap(proxy);
- var receiver = void 0;
- if (!IS_UNDEFINED(trap)) {
- trap = %GetCallTrap(proxy);
- var proto = proxy.prototype;
- if (!IS_SPEC_OBJECT(proto) && proto !== null) {
- throw MakeTypeError("proto_object_or_null", [proto]);
- }
- receiver = new global.Object();
- receiver.__proto__ = proto;
- }
- return %Apply(trap, this, arguments, 1, arity);
+ return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
}
@@ -469,11 +458,12 @@ function APPLY_PREPARE(args) {
}
if (!IS_SPEC_FUNCTION(this)) {
- throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
+ throw %MakeTypeError('apply_non_function',
+ [ %ToString(this), typeof this ]);
}
// Make sure the arguments list has the right type.
- if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
+ if (args != null && !IS_SPEC_OBJECT(args)) {
throw %MakeTypeError('apply_wrong_args', []);
}
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index bcd0a1d63..89ad8afab 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -122,17 +122,20 @@ void Safepoint::DefinePointerRegister(Register reg) {
Safepoint SafepointTableBuilder::DefineSafepoint(
- Assembler* assembler, Safepoint::Kind kind, int arguments,
- int deoptimization_index) {
- ASSERT(deoptimization_index != -1);
+ Assembler* assembler,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
ASSERT(arguments >= 0);
- DeoptimizationInfo pc_and_deoptimization_index;
- pc_and_deoptimization_index.pc = assembler->pc_offset();
- pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
- pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
- pc_and_deoptimization_index.arguments = arguments;
- pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
- deoptimization_info_.Add(pc_and_deoptimization_index);
+ DeoptimizationInfo info;
+ info.pc = assembler->pc_offset();
+ info.arguments = arguments;
+ info.has_doubles = (kind & Safepoint::kWithDoubles);
+ deoptimization_info_.Add(info);
+ deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex);
+ if (deopt_mode == Safepoint::kNoLazyDeopt) {
+ last_lazy_safepoint_ = deopt_index_list_.length();
+ }
indexes_.Add(new ZoneList<int>(8));
registers_.Add((kind & Safepoint::kWithRegisters)
? new ZoneList<int>(4)
@@ -141,6 +144,12 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
}
+void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
+ while (last_lazy_safepoint_ < deopt_index_list_.length()) {
+ deopt_index_list_[last_lazy_safepoint_++] = index;
+ }
+}
+
unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_);
return offset_;
@@ -173,11 +182,11 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
assembler->dd(length);
assembler->dd(bytes_per_entry);
- // Emit sorted table of pc offsets together with deoptimization indexes and
- // pc after gap information.
+ // Emit sorted table of pc offsets together with deoptimization indexes.
for (int i = 0; i < length; i++) {
assembler->dd(deoptimization_info_[i].pc);
- assembler->dd(EncodeExceptPC(deoptimization_info_[i]));
+ assembler->dd(EncodeExceptPC(deoptimization_info_[i],
+ deopt_index_list_[i]));
}
// Emit table of bitmaps.
@@ -222,35 +231,14 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
}
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
- unsigned index = info.deoptimization_index;
- unsigned gap_size = info.pc_after_gap - info.pc;
+uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
+ unsigned index) {
uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
- encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
return encoding;
}
-int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
- int result = 0;
- if (!deoptimization_info_.is_empty()) {
- unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
- for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
- DeoptimizationInfo info = deoptimization_info_[i];
- if (static_cast<int>(info.deoptimization_index) !=
- Safepoint::kNoDeoptimizationIndex) {
- if (previous_gap_end + limit > info.pc) {
- result++;
- }
- previous_gap_end = info.pc_after_gap;
- }
- }
- }
- return result;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index de537f982..57fceecd9 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -62,10 +62,20 @@ class SafepointEntry BASE_EMBEDDED {
return DeoptimizationIndexField::decode(info_);
}
- int gap_code_size() const {
- ASSERT(is_valid());
- return GapCodeSizeField::decode(info_);
- }
+ static const int kArgumentsFieldBits = 3;
+ static const int kSaveDoublesFieldBits = 1;
+ static const int kDeoptIndexBits =
+ 32 - kArgumentsFieldBits - kSaveDoublesFieldBits;
+ class DeoptimizationIndexField:
+ public BitField<int, 0, kDeoptIndexBits> {}; // NOLINT
+ class ArgumentsField:
+ public BitField<unsigned,
+ kDeoptIndexBits,
+ kArgumentsFieldBits> {}; // NOLINT
+ class SaveDoublesField:
+ public BitField<bool,
+ kDeoptIndexBits + kArgumentsFieldBits,
+ kSaveDoublesFieldBits> { }; // NOLINT
int argument_count() const {
ASSERT(is_valid());
@@ -85,27 +95,6 @@ class SafepointEntry BASE_EMBEDDED {
bool HasRegisters() const;
bool HasRegisterAt(int reg_index) const;
- // Reserve 13 bits for the gap code size. On ARM a constant pool can be
- // emitted when generating the gap code. The size of the const pool is less
- // than what can be represented in 12 bits, so 13 bits gives room for having
- // instructions before potentially emitting a constant pool.
- static const int kGapCodeSizeBits = 13;
- static const int kArgumentsFieldBits = 3;
- static const int kSaveDoublesFieldBits = 1;
- static const int kDeoptIndexBits =
- 32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
- class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
- class DeoptimizationIndexField: public BitField<int,
- kGapCodeSizeBits,
- kDeoptIndexBits> {}; // NOLINT
- class ArgumentsField: public BitField<unsigned,
- kGapCodeSizeBits + kDeoptIndexBits,
- kArgumentsFieldBits> {}; // NOLINT
- class SaveDoublesField: public BitField<bool,
- kGapCodeSizeBits + kDeoptIndexBits +
- kArgumentsFieldBits,
- kSaveDoublesFieldBits> { }; // NOLINT
-
private:
unsigned info_;
uint8_t* bits_;
@@ -186,6 +175,11 @@ class Safepoint BASE_EMBEDDED {
kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
} Kind;
+ enum DeoptMode {
+ kNoLazyDeopt,
+ kLazyDeopt
+ };
+
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
@@ -206,9 +200,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
public:
SafepointTableBuilder()
: deoptimization_info_(32),
+ deopt_index_list_(32),
indexes_(32),
registers_(32),
- emitted_(false) { }
+ emitted_(false),
+ last_lazy_safepoint_(0) { }
// Get the offset of the emitted safepoint table in the code.
unsigned GetCodeOffset() const;
@@ -217,50 +213,34 @@ class SafepointTableBuilder BASE_EMBEDDED {
Safepoint DefineSafepoint(Assembler* assembler,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index);
-
- // Update the last safepoint with the size of the code generated until the
- // end of the gap following it.
- void SetPcAfterGap(int pc) {
- ASSERT(!deoptimization_info_.is_empty());
- int index = deoptimization_info_.length() - 1;
- deoptimization_info_[index].pc_after_gap = pc;
- }
+ Safepoint::DeoptMode mode);
- // Get the end pc offset of the last safepoint, including the code generated
- // until the end of the gap following it.
- unsigned GetPcAfterGap() {
- int index = deoptimization_info_.length();
- if (index == 0) return 0;
- return deoptimization_info_[index - 1].pc_after_gap;
- }
+ // Record deoptimization index for lazy deoptimization for the last
+ // outstanding safepoints.
+ void RecordLazyDeoptimizationIndex(int index);
// Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry);
- // Count the number of deoptimization points where the next
- // following deoptimization point comes less than limit bytes
- // after the end of this point's gap.
- int CountShortDeoptimizationIntervals(unsigned limit);
private:
struct DeoptimizationInfo {
unsigned pc;
- unsigned deoptimization_index;
- unsigned pc_after_gap;
unsigned arguments;
bool has_doubles;
};
- uint32_t EncodeExceptPC(const DeoptimizationInfo& info);
+ uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index);
ZoneList<DeoptimizationInfo> deoptimization_info_;
+ ZoneList<unsigned> deopt_index_list_;
ZoneList<ZoneList<int>*> indexes_;
ZoneList<ZoneList<int>*> registers_;
unsigned offset_;
bool emitted_;
+ int last_lazy_safepoint_;
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 69ea8ae6e..01fe81c64 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -36,30 +36,26 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// Scanner::LiteralScope
-
-Scanner::LiteralScope::LiteralScope(Scanner* self)
- : scanner_(self), complete_(false) {
- self->StartLiteral();
-}
-
+// Scanner
-Scanner::LiteralScope::~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
-}
+Scanner::Scanner(UnicodeCache* unicode_cache)
+ : unicode_cache_(unicode_cache),
+ octal_pos_(Location::invalid()),
+ harmony_scoping_(false) { }
-void Scanner::LiteralScope::Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
+void Scanner::Initialize(UC16CharacterStream* source) {
+ source_ = source;
+ // Need to capture identifiers in order to recognize "get" and "set"
+ // in object literals.
+ Init();
+ // Skip initial whitespace allowing HTML comment ends just like
+ // after a newline and scan first token.
+ has_line_terminator_before_next_ = true;
+ SkipWhiteSpace();
+ Scan();
}
-// ----------------------------------------------------------------------------
-// Scanner
-
-Scanner::Scanner(UnicodeCache* unicode_cache)
- : unicode_cache_(unicode_cache) { }
-
uc32 Scanner::ScanHexNumber(int expected_length) {
ASSERT(expected_length <= 4); // prevent overflow
@@ -88,29 +84,6 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
}
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
- : Scanner(scanner_contants),
- octal_pos_(Location::invalid()),
- harmony_block_scoping_(false) { }
-
-
-void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
// Ensure that tokens can be stored in a byte.
STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
@@ -247,7 +220,7 @@ static const byte one_char_tokens[] = {
};
-Token::Value JavaScriptScanner::Next() {
+Token::Value Scanner::Next() {
current_ = next_;
has_line_terminator_before_next_ = false;
has_multiline_comment_before_next_ = false;
@@ -279,7 +252,7 @@ static inline bool IsByteOrderMark(uc32 c) {
}
-bool JavaScriptScanner::SkipWhiteSpace() {
+bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
@@ -319,7 +292,7 @@ bool JavaScriptScanner::SkipWhiteSpace() {
}
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
+Token::Value Scanner::SkipSingleLineComment() {
Advance();
// The line terminator at the end of the line is not considered
@@ -335,7 +308,7 @@ Token::Value JavaScriptScanner::SkipSingleLineComment() {
}
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
+Token::Value Scanner::SkipMultiLineComment() {
ASSERT(c0_ == '*');
Advance();
@@ -361,7 +334,7 @@ Token::Value JavaScriptScanner::SkipMultiLineComment() {
}
-Token::Value JavaScriptScanner::ScanHtmlComment() {
+Token::Value Scanner::ScanHtmlComment() {
// Check for <!-- comments.
ASSERT(c0_ == '!');
Advance();
@@ -376,7 +349,7 @@ Token::Value JavaScriptScanner::ScanHtmlComment() {
}
-void JavaScriptScanner::Scan() {
+void Scanner::Scan() {
next_.literal_chars = NULL;
Token::Value token;
do {
@@ -616,7 +589,7 @@ void JavaScriptScanner::Scan() {
}
-void JavaScriptScanner::SeekForward(int pos) {
+void Scanner::SeekForward(int pos) {
// After this call, we will have the token at the given position as
// the "next" token. The "current" token will be invalid.
if (pos == next_.location.beg_pos) return;
@@ -637,7 +610,7 @@ void JavaScriptScanner::SeekForward(int pos) {
}
-void JavaScriptScanner::ScanEscape() {
+void Scanner::ScanEscape() {
uc32 c = c0_;
Advance();
@@ -689,7 +662,7 @@ void JavaScriptScanner::ScanEscape() {
// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
// ECMA-262. Other JS VMs support them.
-uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
uc32 x = c - '0';
int i = 0;
for (; i < length; i++) {
@@ -712,7 +685,7 @@ uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
}
-Token::Value JavaScriptScanner::ScanString() {
+Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
@@ -736,13 +709,13 @@ Token::Value JavaScriptScanner::ScanString() {
}
-void JavaScriptScanner::ScanDecimalDigits() {
+void Scanner::ScanDecimalDigits() {
while (IsDecimalDigit(c0_))
AddLiteralCharAdvance();
}
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
+Token::Value Scanner::ScanNumber(bool seen_period) {
ASSERT(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
@@ -827,7 +800,7 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
}
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
Advance();
if (c0_ != 'u') return -1;
Advance();
@@ -872,7 +845,7 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
KEYWORD("instanceof", Token::INSTANCEOF) \
KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('l') \
- KEYWORD("let", harmony_block_scoping \
+ KEYWORD("let", harmony_scoping \
? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('n') \
KEYWORD("new", Token::NEW) \
@@ -906,7 +879,7 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
static Token::Value KeywordOrIdentifierToken(const char* input,
int input_length,
- bool harmony_block_scoping) {
+ bool harmony_scoping) {
ASSERT(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -944,7 +917,7 @@ static Token::Value KeywordOrIdentifierToken(const char* input,
}
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
+Token::Value Scanner::ScanIdentifierOrKeyword() {
ASSERT(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
// Scan identifier start character.
@@ -982,14 +955,14 @@ Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
Vector<const char> chars = next_.literal_chars->ascii_literal();
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
- harmony_block_scoping_);
+ harmony_scoping_);
}
return Token::IDENTIFIER;
}
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
// Scan the rest of the identifier characters.
while (unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
@@ -1012,7 +985,7 @@ Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
}
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
// Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
bool in_character_class = false;
@@ -1059,7 +1032,7 @@ bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
}
-bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
+bool Scanner::ScanLiteralUnicodeEscape() {
ASSERT(c0_ == '\\');
uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
Advance();
@@ -1089,7 +1062,7 @@ bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
}
-bool JavaScriptScanner::ScanRegExpFlags() {
+bool Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
LiteralScope literal(this);
while (unicode_cache_->IsIdentifierPart(c0_)) {
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 16c3a427c..c512ec3fe 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -41,6 +41,25 @@
namespace v8 {
namespace internal {
+
+// General collection of (multi-)bit-flags that can be passed to scanners and
+// parsers to signify their (initial) mode of operation.
+enum ParsingFlags {
+ kNoParsingFlags = 0,
+ // Embed LanguageMode values in parsing flags, i.e., equivalent to:
+ // CLASSIC_MODE = 0,
+ // STRICT_MODE,
+ // EXTENDED_MODE,
+ kLanguageModeMask = 0x03,
+ kAllowLazy = 4,
+ kAllowNativesSyntax = 8
+};
+
+STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
+STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
+STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
+
+
// Returns the value (0 .. 15) of a hexadecimal character c.
// If c is not a legal hexadecimal character, returns a value < 0.
inline int HexValue(uc32 c) {
@@ -158,7 +177,7 @@ class LiteralBuffer {
}
}
- inline void AddChar(uc16 character) {
+ INLINE(void AddChar(uc16 character)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
if (is_ascii_) {
if (character < kMaxAsciiCharCodeU) {
@@ -249,35 +268,32 @@ class LiteralBuffer {
// ----------------------------------------------------------------------------
-// Scanner base-class.
+// JavaScript Scanner.
-// Generic functionality used by both JSON and JavaScript scanners.
class Scanner {
public:
- // -1 is outside of the range of any real source code.
- static const int kNoOctalLocation = -1;
-
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
+ // Scoped helper for literal recording. Automatically drops the literal
+ // if aborting the scanning before it's complete.
class LiteralScope {
public:
- explicit LiteralScope(Scanner* self);
- ~LiteralScope();
- void Complete();
+ explicit LiteralScope(Scanner* self)
+ : scanner_(self), complete_(false) {
+ scanner_->StartLiteral();
+ }
+ ~LiteralScope() {
+ if (!complete_) scanner_->DropLiteral();
+ }
+ void Complete() {
+ scanner_->TerminateLiteral();
+ complete_ = true;
+ }
private:
Scanner* scanner_;
bool complete_;
};
- explicit Scanner(UnicodeCache* scanner_contants);
-
- // Returns the current token again.
- Token::Value current_token() { return current_.token; }
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
+ // Representation of an interval of source positions.
struct Location {
Location(int b, int e) : beg_pos(b), end_pos(e) { }
Location() : beg_pos(0), end_pos(0) { }
@@ -292,21 +308,28 @@ class Scanner {
int end_pos;
};
+ // -1 is outside of the range of any real source code.
+ static const int kNoOctalLocation = -1;
+
+ typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+ explicit Scanner(UnicodeCache* scanner_contants);
+
+ void Initialize(UC16CharacterStream* source);
+
+ // Returns the next token and advances input.
+ Token::Value Next();
+ // Returns the current token again.
+ Token::Value current_token() { return current_.token; }
// Returns the location information for the current token
- // (the token returned by Next()).
+ // (the token last returned by Next()).
Location location() const { return current_.location; }
- Location peek_location() const { return next_.location; }
-
// Returns the literal string, if any, for the current token (the
- // token returned by Next()). The string is 0-terminated and in
- // UTF-8 format; they may contain 0-characters. Literal strings are
- // collected for identifiers, strings, and numbers.
+ // token last returned by Next()). The string is 0-terminated.
+ // Literal strings are collected for identifiers, strings, and
+ // numbers.
// These functions only give the correct result if the literal
// was scanned between calls to StartLiteral() and TerminateLiteral().
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
Vector<const char> literal_ascii_string() {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->ascii_literal();
@@ -315,6 +338,10 @@ class Scanner {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->uc16_literal();
}
+ bool is_literal_ascii() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_ascii();
+ }
int literal_length() const {
ASSERT_NOT_NULL(current_.literal_chars);
return current_.literal_chars->length();
@@ -330,12 +357,15 @@ class Scanner {
return current_.literal_chars->length() != source_length;
}
+ // Similar functions for the upcoming token.
+
+ // One token look-ahead (past the token returned by Next()).
+ Token::Value peek() const { return next_.token; }
+
+ Location peek_location() const { return next_.location; }
+
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
- }
Vector<const char> next_literal_ascii_string() {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->ascii_literal();
@@ -344,6 +374,10 @@ class Scanner {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->uc16_literal();
}
+ bool is_next_literal_ascii() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_ascii();
+ }
int next_literal_length() const {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->length();
@@ -353,7 +387,46 @@ class Scanner {
static const int kCharacterLookaheadBufferSize = 1;
- protected:
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+ uc32 ScanOctalEscape(uc32 c, int length);
+
+ // Returns the location of the last seen octal literal.
+ Location octal_position() const { return octal_pos_; }
+ void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+ // Seek forward to the given position. This operation does not
+ // work in general, for instance when there are pushed back
+ // characters, but works for seeking forward until simple delimiter
+ // tokens, which is what it is used for.
+ void SeekForward(int pos);
+
+ bool HarmonyScoping() const {
+ return harmony_scoping_;
+ }
+ void SetHarmonyScoping(bool block_scoping) {
+ harmony_scoping_ = block_scoping;
+ }
+
+
+ // Returns true if there was a line terminator before the peek'ed token,
+ // possibly inside a multi-line comment.
+ bool HasAnyLineTerminatorBeforeNext() const {
+ return has_line_terminator_before_next_ ||
+ has_multiline_comment_before_next_;
+ }
+
+ // Scans the input as a regular expression pattern, previous
+ // character(s) must be /(=). Returns true if a pattern is scanned.
+ bool ScanRegExpPattern(bool seen_equal);
+ // Returns true if regexp flags are scanned (always since flags can
+ // be empty).
+ bool ScanRegExpFlags();
+
+ // Tells whether the buffer contains an identifier (no escapes).
+ // Used for checking if a property name is an identifier.
+ static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+ private:
// The current and look-ahead token.
struct TokenDesc {
Token::Value token;
@@ -378,7 +451,7 @@ class Scanner {
next_.literal_chars = free_buffer;
}
- inline void AddLiteralChar(uc32 c) {
+ INLINE(void AddLiteralChar(uc32 c)) {
ASSERT_NOT_NULL(next_.literal_chars);
next_.literal_chars->AddChar(c);
}
@@ -423,107 +496,14 @@ class Scanner {
uc32 ScanHexNumber(int expected_length);
- // Return the current source position.
- int source_pos() {
- return source_->pos() - kCharacterLookaheadBufferSize;
- }
-
- UnicodeCache* unicode_cache_;
-
- // Buffers collecting literal strings, numbers, etc.
- LiteralBuffer literal_buffer1_;
- LiteralBuffer literal_buffer2_;
-
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
-
- // Input stream. Must be initialized to an UC16CharacterStream.
- UC16CharacterStream* source_;
-
- // One Unicode character look-ahead; c0_ < 0 at the end of the input.
- uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
- // A LiteralScope that disables recording of some types of JavaScript
- // literals. If the scanner is configured to not record the specific
- // type of literal, the scope will not call StartLiteral.
- class LiteralScope {
- public:
- explicit LiteralScope(JavaScriptScanner* self)
- : scanner_(self), complete_(false) {
- scanner_->StartLiteral();
- }
- ~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
- }
- void Complete() {
- scanner_->TerminateLiteral();
- complete_ = true;
- }
-
- private:
- JavaScriptScanner* scanner_;
- bool complete_;
- };
-
- explicit JavaScriptScanner(UnicodeCache* scanner_contants);
-
- void Initialize(UC16CharacterStream* source);
-
- // Returns the next token.
- Token::Value Next();
-
- // Returns true if there was a line terminator before the peek'ed token,
- // possibly inside a multi-line comment.
- bool HasAnyLineTerminatorBeforeNext() const {
- return has_line_terminator_before_next_ ||
- has_multiline_comment_before_next_;
- }
-
- // Scans the input as a regular expression pattern, previous
- // character(s) must be /(=). Returns true if a pattern is scanned.
- bool ScanRegExpPattern(bool seen_equal);
- // Returns true if regexp flags are scanned (always since flags can
- // be empty).
- bool ScanRegExpFlags();
-
- // Tells whether the buffer contains an identifier (no escapes).
- // Used for checking if a property name is an identifier.
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
-
- // Returns the location of the last seen octal literal
- Location octal_position() const { return octal_pos_; }
- void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
- // Seek forward to the given position. This operation does not
- // work in general, for instance when there are pushed back
- // characters, but works for seeking forward until simple delimiter
- // tokens, which is what it is used for.
- void SeekForward(int pos);
-
- bool HarmonyBlockScoping() const {
- return harmony_block_scoping_;
- }
- void SetHarmonyBlockScoping(bool block_scoping) {
- harmony_block_scoping_ = block_scoping;
- }
-
+ // Scans a single JavaScript token.
+ void Scan();
- protected:
bool SkipWhiteSpace();
Token::Value SkipSingleLineComment();
Token::Value SkipMultiLineComment();
-
- // Scans a single JavaScript token.
- void Scan();
+ // Scans a possible HTML comment -- begins with '<!'.
+ Token::Value ScanHtmlComment();
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
@@ -533,9 +513,6 @@ class JavaScriptScanner : public Scanner {
void ScanEscape();
Token::Value ScanString();
- // Scans a possible HTML comment -- begins with '<!'.
- Token::Value ScanHtmlComment();
-
// Decodes a unicode escape-sequence which is part of an identifier.
// If the escape sequence cannot be decoded the result is kBadChar.
uc32 ScanIdentifierUnicodeEscape();
@@ -544,9 +521,30 @@ class JavaScriptScanner : public Scanner {
// flags.
bool ScanLiteralUnicodeEscape();
+ // Return the current source position.
+ int source_pos() {
+ return source_->pos() - kCharacterLookaheadBufferSize;
+ }
+
+ UnicodeCache* unicode_cache_;
+
+ // Buffers collecting literal strings, numbers, etc.
+ LiteralBuffer literal_buffer1_;
+ LiteralBuffer literal_buffer2_;
+
+ TokenDesc current_; // desc for current token (as returned by Next())
+ TokenDesc next_; // desc for next token (one token look-ahead)
+
+ // Input stream. Must be initialized to an UC16CharacterStream.
+ UC16CharacterStream* source_;
+
+
// Start position of the octal literal last scanned.
Location octal_pos_;
+ // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+ uc32 c0_;
+
// Whether there is a line terminator whitespace character after
// the current token, and before the next. Does not count newlines
// inside multiline comments.
@@ -556,7 +554,7 @@ class JavaScriptScanner : public Scanner {
bool has_multiline_comment_before_next_;
// Whether we scan 'let' as a keyword for harmony block scoped
// let bindings.
- bool harmony_block_scoping_;
+ bool harmony_scoping_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index ad31ca47c..0f3623470 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -38,456 +38,297 @@ namespace v8 {
namespace internal {
-static int CompareLocal(Variable* const* v, Variable* const* w) {
- int x = (*v)->index();
- int y = (*w)->index();
- // Consider sorting them according to type as well?
- return x - y;
-}
-
-
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
- : function_name_(FACTORY->empty_symbol()),
- calls_eval_(scope->calls_eval()),
- is_strict_mode_(scope->is_strict_mode()),
- parameters_(scope->num_parameters()),
- stack_slots_(scope->num_stack_slots()),
- context_slots_(scope->num_heap_slots()),
- context_modes_(scope->num_heap_slots()) {
- // Add parameters.
- for (int i = 0; i < scope->num_parameters(); i++) {
- ASSERT(parameters_.length() == i);
- parameters_.Add(scope->parameter(i)->name());
- }
-
- // Add stack locals and collect heap locals.
- // We are assuming that the locals' slots are allocated in
- // increasing order, so we can simply add them to the
- // ScopeInfo lists. However, due to usage analysis, this is
- // not true for context-allocated locals: Some of them
- // may be parameters which are allocated before the
- // non-parameter locals. When the non-parameter locals are
- // sorted according to usage, the allocated slot indices may
- // not be in increasing order with the variable list anymore.
- // Thus, we first collect the context-allocated locals, and then
- // sort them by context slot index before adding them to the
- // ScopeInfo list.
- List<Variable*, Allocator> locals(32); // 32 is a wild guess
- ASSERT(locals.is_empty());
- scope->CollectUsedVariables(&locals);
- locals.Sort(&CompareLocal);
-
- List<Variable*, Allocator> heap_locals(locals.length());
- for (int i = 0; i < locals.length(); i++) {
- Variable* var = locals[i];
- if (var->is_used()) {
- switch (var->location()) {
- case Variable::UNALLOCATED:
- case Variable::PARAMETER:
- break;
-
- case Variable::LOCAL:
- ASSERT(stack_slots_.length() == var->index());
- stack_slots_.Add(var->name());
- break;
-
- case Variable::CONTEXT:
- heap_locals.Add(var);
- break;
-
- case Variable::LOOKUP:
- // We don't expect lookup variables in the locals list.
- UNREACHABLE();
- break;
- }
- }
- }
-
- // Add heap locals.
- if (scope->num_heap_slots() > 0) {
- // Add user-defined slots.
- for (int i = 0; i < heap_locals.length(); i++) {
- ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
- context_slots_.length());
- ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
- context_modes_.length());
- context_slots_.Add(heap_locals[i]->name());
- context_modes_.Add(heap_locals[i]->mode());
+Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
+ // Collect stack and context locals.
+ ZoneList<Variable*> stack_locals(scope->StackLocalCount());
+ ZoneList<Variable*> context_locals(scope->ContextLocalCount());
+ scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
+ const int stack_local_count = stack_locals.length();
+ const int context_local_count = context_locals.length();
+ // Make sure we allocate the correct amount.
+ ASSERT(scope->StackLocalCount() == stack_local_count);
+ ASSERT(scope->ContextLocalCount() == context_local_count);
+
+ // Determine use and location of the function variable if it is present.
+ FunctionVariableInfo function_name_info;
+ VariableMode function_variable_mode;
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ Variable* var = scope->function()->var();
+ if (!var->is_used()) {
+ function_name_info = UNUSED;
+ } else if (var->IsContextSlot()) {
+ function_name_info = CONTEXT;
+ } else {
+ ASSERT(var->IsStackLocal());
+ function_name_info = STACK;
}
-
+ function_variable_mode = var->mode();
} else {
- ASSERT(heap_locals.length() == 0);
+ function_name_info = NONE;
+ function_variable_mode = VAR;
}
- // Add the function context slot, if present.
- // For now, this must happen at the very end because of the
- // ordering of the scope info slots and the respective slot indices.
- if (scope->is_function_scope()) {
- VariableProxy* proxy = scope->function();
- if (proxy != NULL &&
- proxy->var()->is_used() &&
- proxy->var()->IsContextSlot()) {
- function_name_ = proxy->name();
- // Note that we must not find the function name in the context slot
- // list - instead it must be handled separately in the
- // Contexts::Lookup() function. Thus record an empty symbol here so we
- // get the correct number of context slots.
- ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
- context_slots_.length());
- ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
- context_modes_.length());
- context_slots_.Add(FACTORY->empty_symbol());
- context_modes_.Add(Variable::INTERNAL);
- }
+ const bool has_function_name = function_name_info != NONE;
+ const int parameter_count = scope->num_parameters();
+ const int length = kVariablePartIndex
+ + parameter_count + stack_local_count + 2 * context_local_count
+ + (has_function_name ? 2 : 0);
+
+ Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
+
+ // Encode the flags.
+ int flags = TypeField::encode(scope->type()) |
+ CallsEvalField::encode(scope->calls_eval()) |
+ LanguageModeField::encode(scope->language_mode()) |
+ FunctionVariableField::encode(function_name_info) |
+ FunctionVariableMode::encode(function_variable_mode);
+ scope_info->SetFlags(flags);
+ scope_info->SetParameterCount(parameter_count);
+ scope_info->SetStackLocalCount(stack_local_count);
+ scope_info->SetContextLocalCount(context_local_count);
+
+ int index = kVariablePartIndex;
+ // Add parameters.
+ ASSERT(index == scope_info->ParameterEntriesIndex());
+ for (int i = 0; i < parameter_count; ++i) {
+ scope_info->set(index++, *scope->parameter(i)->name());
}
-}
-
-// Encoding format in a FixedArray object:
-//
-// - function name
-//
-// - calls eval boolean flag
-//
-// - number of variables in the context object (smi) (= function context
-// slot index + 1)
-// - list of pairs (name, Var mode) of context-allocated variables (starting
-// with context slot 0)
-//
-// - number of parameters (smi)
-// - list of parameter names (starting with parameter 0 first)
-//
-// - number of variables on the stack (smi)
-// - list of names of stack-allocated variables (starting with stack slot 0)
-
-// The ScopeInfo representation could be simplified and the ScopeInfo
-// re-implemented (with almost the same interface). Here is a
-// suggestion for the new format:
-//
-// - have a single list with all variable names (parameters, stack locals,
-// context locals), followed by a list of non-Object* values containing
-// the variables information (what kind, index, attributes)
-// - searching the linear list of names is fast and yields an index into the
-// list if the variable name is found
-// - that list index is then used to find the variable information in the
-// subsequent list
-// - the list entries don't have to be in any particular order, so all the
-// current sorting business can go away
-// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
-// which returns all information at once
-// - when gathering the information from a Scope, we only need to iterate
-// through the local variables (parameters and context info is already
-// present)
+ // Add stack locals' names. We are assuming that the stack locals'
+ // slots are allocated in increasing order, so we can simply add
+ // them to the ScopeInfo object.
+ ASSERT(index == scope_info->StackLocalEntriesIndex());
+ for (int i = 0; i < stack_local_count; ++i) {
+ ASSERT(stack_locals[i]->index() == i);
+ scope_info->set(index++, *stack_locals[i]->name());
+ }
+ // Due to usage analysis, context-allocated locals are not necessarily in
+ // increasing order: Some of them may be parameters which are allocated before
+ // the non-parameter locals. When the non-parameter locals are sorted
+ // according to usage, the allocated slot indices may not be in increasing
+ // order with the variable list anymore. Thus, we first need to sort them by
+ // context slot index before adding them to the ScopeInfo object.
+ context_locals.Sort(&Variable::CompareIndex);
+
+ // Add context locals' names.
+ ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
+ for (int i = 0; i < context_local_count; ++i) {
+ scope_info->set(index++, *context_locals[i]->name());
+ }
-static inline Object** ReadInt(Object** p, int* x) {
- *x = (reinterpret_cast<Smi*>(*p++))->value();
- return p;
-}
+ // Add context locals' info.
+ ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
+ for (int i = 0; i < context_local_count; ++i) {
+ Variable* var = context_locals[i];
+ uint32_t value = ContextLocalMode::encode(var->mode()) |
+ ContextLocalInitFlag::encode(var->initialization_flag());
+ scope_info->set(index++, Smi::FromInt(value));
+ }
+ // If present, add the function variable name and its index.
+ ASSERT(index == scope_info->FunctionNameEntryIndex());
+ if (has_function_name) {
+ int var_index = scope->function()->var()->index();
+ scope_info->set(index++, *scope->function()->name());
+ scope_info->set(index++, Smi::FromInt(var_index));
+ ASSERT(function_name_info != STACK ||
+ (var_index == scope_info->StackLocalCount() &&
+ var_index == scope_info->StackSlotCount() - 1));
+ ASSERT(function_name_info != CONTEXT ||
+ var_index == scope_info->ContextLength() - 1);
+ }
-static inline Object** ReadBool(Object** p, bool* x) {
- *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
- return p;
+ ASSERT(index == scope_info->length());
+ ASSERT(scope->num_parameters() == scope_info->ParameterCount());
+ ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
+ ASSERT(scope->num_heap_slots() == scope_info->ContextLength());
+ return scope_info;
}
-static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
- *s = Handle<String>(reinterpret_cast<String*>(*p++));
- return p;
+ScopeInfo* ScopeInfo::Empty() {
+ return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
}
-template <class Allocator>
-static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
- ASSERT(list->is_empty());
- int n;
- p = ReadInt(p, &n);
- while (n-- > 0) {
- Handle<String> s;
- p = ReadSymbol(p, &s);
- list->Add(s);
- }
- return p;
-}
-
-
-template <class Allocator>
-static Object** ReadList(Object** p,
- List<Handle<String>, Allocator>* list,
- List<Variable::Mode, Allocator>* modes) {
- ASSERT(list->is_empty());
- int n;
- p = ReadInt(p, &n);
- while (n-- > 0) {
- Handle<String> s;
- int m;
- p = ReadSymbol(p, &s);
- p = ReadInt(p, &m);
- list->Add(s);
- modes->Add(static_cast<Variable::Mode>(m));
- }
- return p;
-}
-
-
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
- : function_name_(FACTORY->empty_symbol()),
- parameters_(4),
- stack_slots_(8),
- context_slots_(8),
- context_modes_(8) {
- if (data->length() > 0) {
- Object** p0 = data->data_start();
- Object** p = p0;
- p = ReadSymbol(p, &function_name_);
- p = ReadBool(p, &calls_eval_);
- p = ReadBool(p, &is_strict_mode_);
- p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
- p = ReadList<Allocator>(p, &parameters_);
- p = ReadList<Allocator>(p, &stack_slots_);
- ASSERT((p - p0) == FixedArray::cast(data)->length());
- }
+ScopeType ScopeInfo::Type() {
+ ASSERT(length() > 0);
+ return TypeField::decode(Flags());
}
-static inline Object** WriteInt(Object** p, int x) {
- *p++ = Smi::FromInt(x);
- return p;
+bool ScopeInfo::CallsEval() {
+ return length() > 0 && CallsEvalField::decode(Flags());
}
-static inline Object** WriteBool(Object** p, bool b) {
- *p++ = Smi::FromInt(b ? 1 : 0);
- return p;
+LanguageMode ScopeInfo::language_mode() {
+ return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
}
-static inline Object** WriteSymbol(Object** p, Handle<String> s) {
- *p++ = *s;
- return p;
+int ScopeInfo::LocalCount() {
+ return StackLocalCount() + ContextLocalCount();
}
-template <class Allocator>
-static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
- const int n = list->length();
- p = WriteInt(p, n);
- for (int i = 0; i < n; i++) {
- p = WriteSymbol(p, list->at(i));
+int ScopeInfo::StackSlotCount() {
+ if (length() > 0) {
+ bool function_name_stack_slot =
+ FunctionVariableField::decode(Flags()) == STACK;
+ return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
}
- return p;
+ return 0;
}
-template <class Allocator>
-static Object** WriteList(Object** p,
- List<Handle<String>, Allocator>* list,
- List<Variable::Mode, Allocator>* modes) {
- const int n = list->length();
- p = WriteInt(p, n);
- for (int i = 0; i < n; i++) {
- p = WriteSymbol(p, list->at(i));
- p = WriteInt(p, modes->at(i));
+int ScopeInfo::ContextLength() {
+ if (length() > 0) {
+ int context_locals = ContextLocalCount();
+ bool function_name_context_slot =
+ FunctionVariableField::decode(Flags()) == CONTEXT;
+ bool has_context = context_locals > 0 ||
+ function_name_context_slot ||
+ Type() == WITH_SCOPE ||
+ (Type() == FUNCTION_SCOPE && CallsEval());
+ if (has_context) {
+ return Context::MIN_CONTEXT_SLOTS + context_locals +
+ (function_name_context_slot ? 1 : 0);
+ }
}
- return p;
-}
-
-
-template<class Allocator>
-Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
- // function name, calls eval, is_strict_mode, length for 3 tables:
- const int extra_slots = 1 + 1 + 1 + 3;
- int length = extra_slots +
- context_slots_.length() * 2 +
- parameters_.length() +
- stack_slots_.length();
-
- Handle<SerializedScopeInfo> data(
- SerializedScopeInfo::cast(*FACTORY->NewSerializedScopeInfo(length)));
- AssertNoAllocation nogc;
-
- Object** p0 = data->data_start();
- Object** p = p0;
- p = WriteSymbol(p, function_name_);
- p = WriteBool(p, calls_eval_);
- p = WriteBool(p, is_strict_mode_);
- p = WriteList(p, &context_slots_, &context_modes_);
- p = WriteList(p, &parameters_);
- p = WriteList(p, &stack_slots_);
- ASSERT((p - p0) == length);
-
- return data;
+ return 0;
}
-template<class Allocator>
-Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
- // A local variable can be allocated either on the stack or in the context.
- // For variables allocated in the context they are always preceded by
- // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
- if (i < number_of_stack_slots()) {
- return stack_slot_name(i);
+bool ScopeInfo::HasFunctionName() {
+ if (length() > 0) {
+ return NONE != FunctionVariableField::decode(Flags());
} else {
- return context_slot_name(i - number_of_stack_slots() +
- Context::MIN_CONTEXT_SLOTS);
+ return false;
}
}
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfLocals() const {
- int number_of_locals = number_of_stack_slots();
- if (number_of_context_slots() > 0) {
- ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
- number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
+bool ScopeInfo::HasHeapAllocatedLocals() {
+ if (length() > 0) {
+ return ContextLocalCount() > 0;
+ } else {
+ return false;
}
- return number_of_locals;
}
-Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
- ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
- return sinfo.Serialize();
-}
-
-
-SerializedScopeInfo* SerializedScopeInfo::Empty() {
- return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
-}
-
-
-Object** SerializedScopeInfo::ContextEntriesAddr() {
- ASSERT(length() > 0);
- // +3 for function name, calls eval, strict mode.
- return data_start() + 3;
+bool ScopeInfo::HasContext() {
+ if (length() > 0) {
+ return ContextLength() > 0;
+ } else {
+ return false;
+ }
}
-Object** SerializedScopeInfo::ParameterEntriesAddr() {
- ASSERT(length() > 0);
- Object** p = ContextEntriesAddr();
- int number_of_context_slots;
- p = ReadInt(p, &number_of_context_slots);
- return p + number_of_context_slots*2; // *2 for pairs
+String* ScopeInfo::FunctionName() {
+ ASSERT(HasFunctionName());
+ return String::cast(get(FunctionNameEntryIndex()));
}
-Object** SerializedScopeInfo::StackSlotEntriesAddr() {
- ASSERT(length() > 0);
- Object** p = ParameterEntriesAddr();
- int number_of_parameter_slots;
- p = ReadInt(p, &number_of_parameter_slots);
- return p + number_of_parameter_slots;
+String* ScopeInfo::ParameterName(int var) {
+ ASSERT(0 <= var && var < ParameterCount());
+ int info_index = ParameterEntriesIndex() + var;
+ return String::cast(get(info_index));
}
-bool SerializedScopeInfo::CallsEval() {
- if (length() > 0) {
- Object** p = data_start() + 1; // +1 for function name.
- bool calls_eval;
- p = ReadBool(p, &calls_eval);
- return calls_eval;
- }
- return false;
+String* ScopeInfo::LocalName(int var) {
+ ASSERT(0 <= var && var < LocalCount());
+ ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
+ ContextLocalNameEntriesIndex());
+ int info_index = StackLocalEntriesIndex() + var;
+ return String::cast(get(info_index));
}
-bool SerializedScopeInfo::IsStrictMode() {
- if (length() > 0) {
- Object** p = data_start() + 2; // +2 for function name, calls eval.
- bool strict_mode;
- p = ReadBool(p, &strict_mode);
- return strict_mode;
- }
- return false;
+String* ScopeInfo::StackLocalName(int var) {
+ ASSERT(0 <= var && var < StackLocalCount());
+ int info_index = StackLocalEntriesIndex() + var;
+ return String::cast(get(info_index));
}
-int SerializedScopeInfo::NumberOfStackSlots() {
- if (length() > 0) {
- Object** p = StackSlotEntriesAddr();
- int number_of_stack_slots;
- ReadInt(p, &number_of_stack_slots);
- return number_of_stack_slots;
- }
- return 0;
+String* ScopeInfo::ContextLocalName(int var) {
+ ASSERT(0 <= var && var < ContextLocalCount());
+ int info_index = ContextLocalNameEntriesIndex() + var;
+ return String::cast(get(info_index));
}
-int SerializedScopeInfo::NumberOfContextSlots() {
- if (length() > 0) {
- Object** p = ContextEntriesAddr();
- int number_of_context_slots;
- ReadInt(p, &number_of_context_slots);
- return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
- }
- return 0;
+VariableMode ScopeInfo::ContextLocalMode(int var) {
+ ASSERT(0 <= var && var < ContextLocalCount());
+ int info_index = ContextLocalInfoEntriesIndex() + var;
+ int value = Smi::cast(get(info_index))->value();
+ return ContextLocalMode::decode(value);
}
-bool SerializedScopeInfo::HasHeapAllocatedLocals() {
- if (length() > 0) {
- Object** p = ContextEntriesAddr();
- int number_of_context_slots;
- ReadInt(p, &number_of_context_slots);
- return number_of_context_slots > 0;
- }
- return false;
+InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
+ ASSERT(0 <= var && var < ContextLocalCount());
+ int info_index = ContextLocalInfoEntriesIndex() + var;
+ int value = Smi::cast(get(info_index))->value();
+ return ContextLocalInitFlag::decode(value);
}
-int SerializedScopeInfo::StackSlotIndex(String* name) {
+int ScopeInfo::StackSlotIndex(String* name) {
ASSERT(name->IsSymbol());
if (length() > 0) {
- // Slots start after length entry.
- Object** p0 = StackSlotEntriesAddr();
- int number_of_stack_slots;
- p0 = ReadInt(p0, &number_of_stack_slots);
- Object** p = p0;
- Object** end = p0 + number_of_stack_slots;
- while (p != end) {
- if (*p == name) return static_cast<int>(p - p0);
- p++;
+ int start = StackLocalEntriesIndex();
+ int end = StackLocalEntriesIndex() + StackLocalCount();
+ for (int i = start; i < end; ++i) {
+ if (name == get(i)) {
+ return i - start;
+ }
}
}
return -1;
}
-int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
+
+int ScopeInfo::ContextSlotIndex(String* name,
+ VariableMode* mode,
+ InitializationFlag* init_flag) {
ASSERT(name->IsSymbol());
- Isolate* isolate = GetIsolate();
- int result = isolate->context_slot_cache()->Lookup(this, name, mode);
- if (result != ContextSlotCache::kNotFound) return result;
+ ASSERT(mode != NULL);
+ ASSERT(init_flag != NULL);
if (length() > 0) {
- // Slots start after length entry.
- Object** p0 = ContextEntriesAddr();
- int number_of_context_slots;
- p0 = ReadInt(p0, &number_of_context_slots);
- Object** p = p0;
- Object** end = p0 + number_of_context_slots * 2;
- while (p != end) {
- if (*p == name) {
- ASSERT(((p - p0) & 1) == 0);
- int v;
- ReadInt(p + 1, &v);
- Variable::Mode mode_value = static_cast<Variable::Mode>(v);
- if (mode != NULL) *mode = mode_value;
- result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
- isolate->context_slot_cache()->Update(this, name, mode_value, result);
+ ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
+ int result = context_slot_cache->Lookup(this, name, mode, init_flag);
+ if (result != ContextSlotCache::kNotFound) {
+ ASSERT(result < ContextLength());
+ return result;
+ }
+
+ int start = ContextLocalNameEntriesIndex();
+ int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
+ for (int i = start; i < end; ++i) {
+ if (name == get(i)) {
+ int var = i - start;
+ *mode = ContextLocalMode(var);
+ *init_flag = ContextLocalInitFlag(var);
+ result = Context::MIN_CONTEXT_SLOTS + var;
+ context_slot_cache->Update(this, name, *mode, *init_flag, result);
+ ASSERT(result < ContextLength());
return result;
}
- p += 2;
}
+ context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
}
- isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
return -1;
}
-int SerializedScopeInfo::ParameterIndex(String* name) {
+int ScopeInfo::ParameterIndex(String* name) {
ASSERT(name->IsSymbol());
if (length() > 0) {
// We must read parameters from the end since for
@@ -495,41 +336,58 @@ int SerializedScopeInfo::ParameterIndex(String* name) {
// last declaration of that parameter is used
// inside a function (and thus we need to look
// at the last index). Was bug# 1110337.
- //
- // Eventually, we should only register such parameters
- // once, with corresponding index. This requires a new
- // implementation of the ScopeInfo code. See also other
- // comments in this file regarding this.
- Object** p = ParameterEntriesAddr();
- int number_of_parameter_slots;
- Object** p0 = ReadInt(p, &number_of_parameter_slots);
- p = p0 + number_of_parameter_slots;
- while (p > p0) {
- p--;
- if (*p == name) return static_cast<int>(p - p0);
+ int start = ParameterEntriesIndex();
+ int end = ParameterEntriesIndex() + ParameterCount();
+ for (int i = end - 1; i >= start; --i) {
+ if (name == get(i)) {
+ return i - start;
+ }
}
}
return -1;
}
-int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
+int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
ASSERT(name->IsSymbol());
+ ASSERT(mode != NULL);
if (length() > 0) {
- Object** p = data_start();
- if (*p == name) {
- p = ContextEntriesAddr();
- int number_of_context_slots;
- ReadInt(p, &number_of_context_slots);
- ASSERT(number_of_context_slots != 0);
- // The function context slot is the last entry.
- return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
+ if (FunctionVariableField::decode(Flags()) == CONTEXT &&
+ FunctionName() == name) {
+ *mode = FunctionVariableMode::decode(Flags());
+ return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
}
}
return -1;
}
+int ScopeInfo::ParameterEntriesIndex() {
+ ASSERT(length() > 0);
+ return kVariablePartIndex;
+}
+
+
+int ScopeInfo::StackLocalEntriesIndex() {
+ return ParameterEntriesIndex() + ParameterCount();
+}
+
+
+int ScopeInfo::ContextLocalNameEntriesIndex() {
+ return StackLocalEntriesIndex() + StackLocalCount();
+}
+
+
+int ScopeInfo::ContextLocalInfoEntriesIndex() {
+ return ContextLocalNameEntriesIndex() + ContextLocalCount();
+}
+
+
+int ScopeInfo::FunctionNameEntryIndex() {
+ return ContextLocalInfoEntriesIndex() + ContextLocalCount();
+}
+
+
int ContextSlotCache::Hash(Object* data, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
@@ -540,12 +398,14 @@ int ContextSlotCache::Hash(Object* data, String* name) {
int ContextSlotCache::Lookup(Object* data,
String* name,
- Variable::Mode* mode) {
+ VariableMode* mode,
+ InitializationFlag* init_flag) {
int index = Hash(data, name);
Key& key = keys_[index];
if ((key.data == data) && key.name->Equals(name)) {
Value result(values_[index]);
if (mode != NULL) *mode = result.mode();
+ if (init_flag != NULL) *init_flag = result.initialization_flag();
return result.index() + kNotFound;
}
return kNotFound;
@@ -554,7 +414,8 @@ int ContextSlotCache::Lookup(Object* data,
void ContextSlotCache::Update(Object* data,
String* name,
- Variable::Mode mode,
+ VariableMode mode,
+ InitializationFlag init_flag,
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
@@ -564,9 +425,9 @@ void ContextSlotCache::Update(Object* data,
key.data = data;
key.name = symbol;
// Please note value only takes a uint as index.
- values_[index] = Value(mode, slot_index - kNotFound).raw();
+ values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
#ifdef DEBUG
- ValidateEntry(data, name, mode, slot_index);
+ ValidateEntry(data, name, mode, init_flag, slot_index);
#endif
}
}
@@ -581,7 +442,8 @@ void ContextSlotCache::Clear() {
void ContextSlotCache::ValidateEntry(Object* data,
String* name,
- Variable::Mode mode,
+ VariableMode mode,
+ InitializationFlag init_flag,
int slot_index) {
String* symbol;
if (HEAP->LookupSymbolIfExists(name, &symbol)) {
@@ -591,51 +453,56 @@ void ContextSlotCache::ValidateEntry(Object* data,
ASSERT(key.name->Equals(name));
Value result(values_[index]);
ASSERT(result.mode() == mode);
+ ASSERT(result.initialization_flag() == init_flag);
ASSERT(result.index() + kNotFound == slot_index);
}
}
-template <class Allocator>
static void PrintList(const char* list_name,
int nof_internal_slots,
- List<Handle<String>, Allocator>& list) {
- if (list.length() > 0) {
+ int start,
+ int end,
+ ScopeInfo* scope_info) {
+ if (start < end) {
PrintF("\n // %s\n", list_name);
if (nof_internal_slots > 0) {
PrintF(" %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
}
- for (int i = 0; i < list.length(); i++) {
- PrintF(" %2d ", i + nof_internal_slots);
- list[i]->ShortPrint();
+ for (int i = nof_internal_slots; start < end; ++i, ++start) {
+ PrintF(" %2d ", i);
+ String::cast(scope_info->get(start))->ShortPrint();
PrintF("\n");
}
}
}
-template<class Allocator>
-void ScopeInfo<Allocator>::Print() {
+void ScopeInfo::Print() {
PrintF("ScopeInfo ");
- if (function_name_->length() > 0)
- function_name_->ShortPrint();
- else
+ if (HasFunctionName()) {
+ FunctionName()->ShortPrint();
+ } else {
PrintF("/* no function name */");
+ }
PrintF("{");
- PrintList<Allocator>("parameters", 0, parameters_);
- PrintList<Allocator>("stack slots", 0, stack_slots_);
- PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
- context_slots_);
+ PrintList("parameters", 0,
+ ParameterEntriesIndex(),
+ ParameterEntriesIndex() + ParameterCount(),
+ this);
+ PrintList("stack slots", 0,
+ StackLocalEntriesIndex(),
+ StackLocalEntriesIndex() + StackLocalCount(),
+ this);
+ PrintList("context slots",
+ Context::MIN_CONTEXT_SLOTS,
+ ContextLocalNameEntriesIndex(),
+ ContextLocalNameEntriesIndex() + ContextLocalCount(),
+ this);
PrintF("}\n");
}
#endif // DEBUG
-
-// Make sure the classes get instantiated by the template system.
-template class ScopeInfo<FreeStoreAllocationPolicy>;
-template class ScopeInfo<PreallocatedStorage>;
-template class ScopeInfo<ZoneListAllocationPolicy>;
-
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 40c5c8a68..93734f5a1 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -35,135 +35,6 @@
namespace v8 {
namespace internal {
-// Scope information represents information about a functions's
-// scopes (currently only one, because we don't do any inlining)
-// and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in FixedArray objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-//
-// Historical note: In other VMs built by this team, ScopeInfo was
-// usually called DebugInfo since the information was used (among
-// other things) for on-demand debugging (Self, Smalltalk). However,
-// DebugInfo seems misleading, since this information is primarily used
-// in debugging-unrelated contexts.
-
-// Forward defined as
-// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-template<class Allocator>
-class ScopeInfo BASE_EMBEDDED {
- public:
- // Create a ScopeInfo instance from a scope.
- explicit ScopeInfo(Scope* scope);
-
- // Create a ScopeInfo instance from SerializedScopeInfo.
- explicit ScopeInfo(SerializedScopeInfo* data);
-
- // Creates a SerializedScopeInfo holding the serialized scope info.
- Handle<SerializedScopeInfo> Serialize();
-
- // --------------------------------------------------------------------------
- // Lookup
-
- Handle<String> function_name() const { return function_name_; }
-
- Handle<String> parameter_name(int i) const { return parameters_[i]; }
- int number_of_parameters() const { return parameters_.length(); }
-
- Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
- int number_of_stack_slots() const { return stack_slots_.length(); }
-
- Handle<String> context_slot_name(int i) const {
- return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
- }
- int number_of_context_slots() const {
- int l = context_slots_.length();
- return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
- }
-
- Handle<String> LocalName(int i) const;
- int NumberOfLocals() const;
-
- // --------------------------------------------------------------------------
- // Debugging support
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Handle<String> function_name_;
- bool calls_eval_;
- bool is_strict_mode_;
- List<Handle<String>, Allocator > parameters_;
- List<Handle<String>, Allocator > stack_slots_;
- List<Handle<String>, Allocator > context_slots_;
- List<Variable::Mode, Allocator > context_modes_;
-};
-
-
-// This object provides quick access to scope info details for runtime
-// routines w/o the need to explicitly create a ScopeInfo object.
-class SerializedScopeInfo : public FixedArray {
- public :
-
- static SerializedScopeInfo* cast(Object* object) {
- ASSERT(object->IsSerializedScopeInfo());
- return reinterpret_cast<SerializedScopeInfo*>(object);
- }
-
- // Does this scope call eval?
- bool CallsEval();
-
- // Is this scope a strict mode scope?
- bool IsStrictMode();
-
- // Return the number of stack slots for code.
- int NumberOfStackSlots();
-
- // Return the number of context slots for code.
- int NumberOfContextSlots();
-
- // Return if this has context slots besides MIN_CONTEXT_SLOTS;
- bool HasHeapAllocatedLocals();
-
- // Lookup support for serialized scope info. Returns the
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be a symbol
- // (canonicalized).
- int StackSlotIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be a symbol (canonicalized).
- // If the slot is present and mode != NULL, sets *mode to the corresponding
- // mode for that variable.
- int ContextSlotIndex(String* name, Variable::Mode* mode);
-
- // Lookup support for serialized scope info. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be a symbol (canonicalized).
- int ParameterIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // function context slot index if the function name is present (named
- // function expressions, only), otherwise returns a value < 0. The name
- // must be a symbol (canonicalized).
- int FunctionContextSlotIndex(String* name);
-
- static Handle<SerializedScopeInfo> Create(Scope* scope);
-
- // Serializes empty scope info.
- static SerializedScopeInfo* Empty();
-
- private:
- inline Object** ContextEntriesAddr();
-
- inline Object** ParameterEntriesAddr();
-
- inline Object** StackSlotEntriesAddr();
-};
-
-
// Cache for mapping (data, property name) into context slot index.
// The cache contains both positive and negative results.
// Slot index equals -1 means the property is absent.
@@ -174,12 +45,14 @@ class ContextSlotCache {
// If absent, kNotFound is returned.
int Lookup(Object* data,
String* name,
- Variable::Mode* mode);
+ VariableMode* mode,
+ InitializationFlag* init_flag);
// Update an element in the cache.
void Update(Object* data,
String* name,
- Variable::Mode mode,
+ VariableMode mode,
+ InitializationFlag init_flag,
int slot_index);
// Clear the cache.
@@ -201,7 +74,8 @@ class ContextSlotCache {
#ifdef DEBUG
void ValidateEntry(Object* data,
String* name,
- Variable::Mode mode,
+ VariableMode mode,
+ InitializationFlag init_flag,
int slot_index);
#endif
@@ -212,11 +86,17 @@ class ContextSlotCache {
};
struct Value {
- Value(Variable::Mode mode, int index) {
+ Value(VariableMode mode,
+ InitializationFlag init_flag,
+ int index) {
ASSERT(ModeField::is_valid(mode));
+ ASSERT(InitField::is_valid(init_flag));
ASSERT(IndexField::is_valid(index));
- value_ = ModeField::encode(mode) | IndexField::encode(index);
+ value_ = ModeField::encode(mode) |
+ IndexField::encode(index) |
+ InitField::encode(init_flag);
ASSERT(mode == this->mode());
+ ASSERT(init_flag == this->initialization_flag());
ASSERT(index == this->index());
}
@@ -224,14 +104,20 @@ class ContextSlotCache {
uint32_t raw() { return value_; }
- Variable::Mode mode() { return ModeField::decode(value_); }
+ VariableMode mode() { return ModeField::decode(value_); }
+
+ InitializationFlag initialization_flag() {
+ return InitField::decode(value_);
+ }
int index() { return IndexField::decode(value_); }
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class ModeField: public BitField<Variable::Mode, 0, 3> {};
- class IndexField: public BitField<int, 3, 32-3> {};
+ class ModeField: public BitField<VariableMode, 0, 3> {};
+ class InitField: public BitField<InitializationFlag, 3, 1> {};
+ class IndexField: public BitField<int, 4, 32-4> {};
+
private:
uint32_t value_;
};
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index d5a7a9f9c..e05ca1725 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -55,7 +55,7 @@ class ZoneAllocator: public Allocator {
};
-static ZoneAllocator LocalsMapAllocator;
+static ZoneAllocator* LocalsMapAllocator = ::new ZoneAllocator();
// ----------------------------------------------------------------------------
@@ -76,23 +76,27 @@ static bool Match(void* key1, void* key2) {
}
-// Dummy constructor
-VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
-
-VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
+VariableMap::VariableMap() : HashMap(Match, LocalsMapAllocator, 8) {}
VariableMap::~VariableMap() {}
-Variable* VariableMap::Declare(Scope* scope,
- Handle<String> name,
- Variable::Mode mode,
- bool is_valid_lhs,
- Variable::Kind kind) {
+Variable* VariableMap::Declare(
+ Scope* scope,
+ Handle<String> name,
+ VariableMode mode,
+ bool is_valid_lhs,
+ Variable::Kind kind,
+ InitializationFlag initialization_flag) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
- p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
+ p->value = new Variable(scope,
+ name,
+ mode,
+ is_valid_lhs,
+ kind,
+ initialization_flag);
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -112,22 +116,7 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// ----------------------------------------------------------------------------
// Implementation of Scope
-
-// Dummy constructor
-Scope::Scope(Type type)
- : isolate_(Isolate::Current()),
- inner_scopes_(0),
- variables_(false),
- temps_(0),
- params_(0),
- unresolved_(0),
- decls_(0),
- already_resolved_(false) {
- SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
-}
-
-
-Scope::Scope(Scope* outer_scope, Type type)
+Scope::Scope(Scope* outer_scope, ScopeType type)
: isolate_(Isolate::Current()),
inner_scopes_(4),
variables_(),
@@ -136,18 +125,18 @@ Scope::Scope(Scope* outer_scope, Type type)
unresolved_(16),
decls_(4),
already_resolved_(false) {
- SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
+ SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
- ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
+ ASSERT_EQ(type == GLOBAL_SCOPE, outer_scope == NULL);
ASSERT(!HasIllegalRedeclaration());
}
Scope::Scope(Scope* inner_scope,
- Type type,
- Handle<SerializedScopeInfo> scope_info)
+ ScopeType type,
+ Handle<ScopeInfo> scope_info)
: isolate_(Isolate::Current()),
inner_scopes_(4),
variables_(),
@@ -156,10 +145,14 @@ Scope::Scope(Scope* inner_scope,
unresolved_(16),
decls_(4),
already_resolved_(true) {
- ASSERT(!scope_info.is_null());
SetDefaults(type, NULL, scope_info);
- if (scope_info->HasHeapAllocatedLocals()) {
- num_heap_slots_ = scope_info_->NumberOfContextSlots();
+ if (!scope_info.is_null()) {
+ num_heap_slots_ = scope_info_->ContextLength();
+ if (*scope_info != ScopeInfo::Empty()) {
+ language_mode_ = scope_info->language_mode();
+ }
+ } else if (is_with_scope()) {
+ num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
}
AddInnerScope(inner_scope);
}
@@ -174,21 +167,23 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
unresolved_(0),
decls_(0),
already_resolved_(true) {
- SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
+ SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
AddInnerScope(inner_scope);
++num_var_or_const_;
+ num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
Variable* variable = variables_.Declare(this,
catch_variable_name,
- Variable::VAR,
+ VAR,
true, // Valid left-hand side.
- Variable::NORMAL);
+ Variable::NORMAL,
+ kCreatedInitialized);
AllocateHeapSlot(variable);
}
-void Scope::SetDefaults(Type type,
+void Scope::SetDefaults(ScopeType type,
Scope* outer_scope,
- Handle<SerializedScopeInfo> scope_info) {
+ Handle<ScopeInfo> scope_info) {
outer_scope_ = outer_scope;
type_ = type;
scope_name_ = isolate_->factory()->empty_symbol();
@@ -201,53 +196,57 @@ void Scope::SetDefaults(Type type,
scope_contains_with_ = false;
scope_calls_eval_ = false;
// Inherit the strict mode from the parent scope.
- strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
- outer_scope_calls_eval_ = false;
+ language_mode_ = (outer_scope != NULL)
+ ? outer_scope->language_mode_ : CLASSIC_MODE;
outer_scope_calls_non_strict_eval_ = false;
inner_scope_calls_eval_ = false;
- outer_scope_is_eval_scope_ = false;
force_eager_compilation_ = false;
num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
scope_info_ = scope_info;
+ start_position_ = RelocInfo::kNoPosition;
+ end_position_ = RelocInfo::kNoPosition;
+ if (!scope_info.is_null()) {
+ scope_calls_eval_ = scope_info->CallsEval();
+ language_mode_ = scope_info->language_mode();
+ }
}
-Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
- Scope* global_scope) {
+Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope) {
// Reconstruct the outer scope chain from a closure's context chain.
- ASSERT(!info->closure().is_null());
- Context* context = info->closure()->context();
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
bool contains_with = false;
while (!context->IsGlobalContext()) {
if (context->IsWithContext()) {
+ Scope* with_scope = new Scope(current_scope,
+ WITH_SCOPE,
+ Handle<ScopeInfo>::null());
+ current_scope = with_scope;
// All the inner scopes are inside a with.
contains_with = true;
for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
s->scope_inside_with_ = true;
}
+ } else if (context->IsFunctionContext()) {
+ ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+ current_scope = new Scope(current_scope,
+ FUNCTION_SCOPE,
+ Handle<ScopeInfo>(scope_info));
+ } else if (context->IsBlockContext()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ current_scope = new Scope(current_scope,
+ BLOCK_SCOPE,
+ Handle<ScopeInfo>(scope_info));
} else {
- if (context->IsFunctionContext()) {
- SerializedScopeInfo* scope_info =
- context->closure()->shared()->scope_info();
- current_scope = new Scope(current_scope, FUNCTION_SCOPE,
- Handle<SerializedScopeInfo>(scope_info));
- } else if (context->IsBlockContext()) {
- SerializedScopeInfo* scope_info =
- SerializedScopeInfo::cast(context->extension());
- current_scope = new Scope(current_scope, BLOCK_SCOPE,
- Handle<SerializedScopeInfo>(scope_info));
- } else {
- ASSERT(context->IsCatchContext());
- String* name = String::cast(context->extension());
- current_scope = new Scope(current_scope, Handle<String>(name));
- }
- if (contains_with) current_scope->RecordWithStatement();
- if (innermost_scope == NULL) innermost_scope = current_scope;
+ ASSERT(context->IsCatchContext());
+ String* name = String::cast(context->extension());
+ current_scope = new Scope(current_scope, Handle<String>(name));
}
+ if (contains_with) current_scope->RecordWithStatement();
+ if (innermost_scope == NULL) innermost_scope = current_scope;
// Forget about a with when we move to a context for a different function.
if (context->previous()->closure() != context->closure()) {
@@ -257,39 +256,48 @@ Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
}
global_scope->AddInnerScope(current_scope);
+ global_scope->PropagateScopeInfo(false);
return (innermost_scope == NULL) ? global_scope : innermost_scope;
}
bool Scope::Analyze(CompilationInfo* info) {
ASSERT(info->function() != NULL);
- Scope* top = info->function()->scope();
+ Scope* scope = info->function()->scope();
+ Scope* top = scope;
+
+ // Traverse the scope tree up to the first unresolved scope or the global
+ // scope and start scope resolution and variable allocation from that scope.
+ while (!top->is_global_scope() &&
+ !top->outer_scope()->already_resolved()) {
+ top = top->outer_scope();
+ }
- while (top->outer_scope() != NULL) top = top->outer_scope();
- top->AllocateVariables(info->calling_context());
+ // Allocated the variables.
+ top->AllocateVariables(info->global_scope());
#ifdef DEBUG
if (info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_scopes
: FLAG_print_scopes) {
- info->function()->scope()->Print();
+ scope->Print();
}
#endif
- info->SetScope(info->function()->scope());
+ info->SetScope(scope);
return true; // Can not fail.
}
-void Scope::Initialize(bool inside_with) {
+void Scope::Initialize() {
ASSERT(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
outer_scope_->inner_scopes_.Add(this);
- scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
+ scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
} else {
- scope_inside_with_ = inside_with;
+ scope_inside_with_ = is_with_scope();
}
// Declare convenience variables.
@@ -300,21 +308,19 @@ void Scope::Initialize(bool inside_with) {
// instead load them directly from the stack. Currently, the only
// such parameter is 'this' which is passed on the stack when
// invoking scripts
- if (is_catch_scope() || is_block_scope()) {
- ASSERT(outer_scope() != NULL);
- receiver_ = outer_scope()->receiver();
- } else {
- ASSERT(is_function_scope() ||
- is_global_scope() ||
- is_eval_scope());
+ if (is_declaration_scope()) {
Variable* var =
variables_.Declare(this,
isolate_->factory()->this_symbol(),
- Variable::VAR,
+ VAR,
false,
- Variable::THIS);
+ Variable::THIS,
+ kCreatedInitialized);
var->AllocateTo(Variable::PARAMETER, -1);
receiver_ = var;
+ } else {
+ ASSERT(outer_scope() != NULL);
+ receiver_ = outer_scope()->receiver();
}
if (is_function_scope()) {
@@ -323,9 +329,10 @@ void Scope::Initialize(bool inside_with) {
// allocated during variable allocation.
variables_.Declare(this,
isolate_->factory()->arguments_symbol(),
- Variable::VAR,
+ VAR,
true,
- Variable::ARGUMENTS);
+ Variable::ARGUMENTS,
+ kCreatedInitialized);
}
}
@@ -365,34 +372,50 @@ Variable* Scope::LocalLookup(Handle<String> name) {
return result;
}
// If we have a serialized scope info, we might find the variable there.
- //
- // We should never lookup 'arguments' in this scope as it is implicitly
- // present in every scope.
- ASSERT(*name != *isolate_->factory()->arguments_symbol());
// There should be no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
// Check context slot lookup.
- Variable::Mode mode;
- int index = scope_info_->ContextSlotIndex(*name, &mode);
+ VariableMode mode;
+ InitializationFlag init_flag;
+ int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
if (index < 0) {
// Check parameters.
- mode = Variable::VAR;
+ mode = VAR;
+ init_flag = kCreatedInitialized;
index = scope_info_->ParameterIndex(*name);
- if (index < 0) {
- // Check the function name.
- index = scope_info_->FunctionContextSlotIndex(*name);
- if (index < 0) return NULL;
- }
+ if (index < 0) return NULL;
}
Variable* var =
- variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ variables_.Declare(this,
+ name,
+ mode,
+ true,
+ Variable::NORMAL,
+ init_flag);
var->AllocateTo(Variable::CONTEXT, index);
return var;
}
+Variable* Scope::LookupFunctionVar(Handle<String> name) {
+ if (function_ != NULL && function_->name().is_identical_to(name)) {
+ return function_->var();
+ } else if (!scope_info_.is_null()) {
+ // If we are backed by a scope info, try to lookup the variable there.
+ VariableMode mode;
+ int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
+ if (index < 0) return NULL;
+ Variable* var = DeclareFunctionVar(name, mode);
+ var->AllocateTo(Variable::CONTEXT, index);
+ return var;
+ } else {
+ return NULL;
+ }
+}
+
+
Variable* Scope::Lookup(Handle<String> name) {
for (Scope* scope = this;
scope != NULL;
@@ -404,54 +427,59 @@ Variable* Scope::Lookup(Handle<String> name) {
}
-Variable* Scope::DeclareFunctionVar(Handle<String> name) {
+Variable* Scope::DeclareFunctionVar(Handle<String> name, VariableMode mode) {
ASSERT(is_function_scope() && function_ == NULL);
- Variable* function_var =
- new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
+ Variable* function_var = new Variable(
+ this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
return function_var;
}
-void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) {
+void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
ASSERT(!already_resolved());
ASSERT(is_function_scope());
- Variable* var =
- variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ Variable* var = variables_.Declare(
+ this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
params_.Add(var);
}
-Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::DeclareLocal(Handle<String> name,
+ VariableMode mode,
+ InitializationFlag init_flag) {
ASSERT(!already_resolved());
// This function handles VAR and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
++num_var_or_const_;
- return variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ return
+ variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag);
}
Variable* Scope::DeclareGlobal(Handle<String> name) {
ASSERT(is_global_scope());
- return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL,
+ return variables_.Declare(this,
+ name,
+ DYNAMIC_GLOBAL,
true,
- Variable::NORMAL);
+ Variable::NORMAL,
+ kCreatedInitialized);
}
-VariableProxy* Scope::NewUnresolved(Handle<String> name,
- bool inside_with,
- int position) {
+VariableProxy* Scope::NewUnresolved(Handle<String> name, int position) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
ASSERT(!already_resolved());
VariableProxy* proxy = new(isolate_->zone()) VariableProxy(
- isolate_, name, false, inside_with, position);
+ isolate_, name, false, position);
unresolved_.Add(proxy);
return proxy;
}
@@ -473,9 +501,10 @@ Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
Variable* var = new Variable(this,
name,
- Variable::TEMPORARY,
+ TEMPORARY,
true,
- Variable::NORMAL);
+ Variable::NORMAL,
+ kCreatedInitialized);
temps_.Add(var);
return var;
}
@@ -505,81 +534,68 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
Declaration* decl = decls_[i];
- if (decl->mode() != Variable::VAR) continue;
+ if (decl->mode() != VAR) continue;
Handle<String> name = decl->proxy()->name();
- bool cond = true;
- for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) {
+
+ // Iterate through all scopes until and including the declaration scope.
+ Scope* previous = NULL;
+ Scope* current = decl->scope();
+ do {
// There is a conflict if there exists a non-VAR binding.
- Variable* other_var = scope->variables_.Lookup(name);
- if (other_var != NULL && other_var->mode() != Variable::VAR) {
+ Variable* other_var = current->variables_.Lookup(name);
+ if (other_var != NULL && other_var->mode() != VAR) {
return decl;
}
-
- // Include declaration scope in the iteration but stop after.
- if (!scope->is_block_scope() && !scope->is_catch_scope()) cond = false;
- }
+ previous = current;
+ current = current->outer_scope_;
+ } while (!previous->is_declaration_scope());
}
return NULL;
}
-template<class Allocator>
-void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
- // Collect variables in this scope.
- // Note that the function_ variable - if present - is not
- // collected here but handled separately in ScopeInfo
- // which is the current user of this function).
+void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+ ZoneList<Variable*>* context_locals) {
+ ASSERT(stack_locals != NULL);
+ ASSERT(context_locals != NULL);
+
+ // Collect temporaries which are always allocated on the stack.
for (int i = 0; i < temps_.length(); i++) {
Variable* var = temps_[i];
if (var->is_used()) {
- locals->Add(var);
+ ASSERT(var->IsStackLocal());
+ stack_locals->Add(var);
}
}
+
+ // Collect declared local variables.
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var->is_used()) {
- locals->Add(var);
+ if (var->IsStackLocal()) {
+ stack_locals->Add(var);
+ } else if (var->IsContextSlot()) {
+ context_locals->Add(var);
+ }
}
}
}
-// Make sure the method gets instantiated by the template system.
-template void Scope::CollectUsedVariables(
- List<Variable*, FreeStoreAllocationPolicy>* locals);
-template void Scope::CollectUsedVariables(
- List<Variable*, PreallocatedStorage>* locals);
-template void Scope::CollectUsedVariables(
- List<Variable*, ZoneListAllocationPolicy>* locals);
-
-
-void Scope::AllocateVariables(Handle<Context> context) {
- ASSERT(outer_scope_ == NULL); // eval or global scopes only
-
+void Scope::AllocateVariables(Scope* global_scope) {
// 1) Propagate scope information.
- // If we are in an eval scope, we may have other outer scopes about
- // which we don't know anything at this point. Thus we must be conservative
- // and assume they may invoke eval themselves. Eventually we could capture
- // this information in the ScopeInfo and then use it here (by traversing
- // the call chain stack, at compile time).
-
- bool eval_scope = is_eval_scope();
- bool outer_scope_calls_eval = false;
bool outer_scope_calls_non_strict_eval = false;
- if (!is_global_scope()) {
- context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
- &outer_scope_calls_non_strict_eval);
+ if (outer_scope_ != NULL) {
+ outer_scope_calls_non_strict_eval =
+ outer_scope_->outer_scope_calls_non_strict_eval() |
+ outer_scope_->calls_non_strict_eval();
}
- PropagateScopeInfo(outer_scope_calls_eval,
- outer_scope_calls_non_strict_eval,
- eval_scope);
+ PropagateScopeInfo(outer_scope_calls_non_strict_eval);
// 2) Resolve variables.
- Scope* global_scope = NULL;
- if (is_global_scope()) global_scope = this;
- ResolveVariablesRecursively(global_scope, context);
+ ResolveVariablesRecursively(global_scope);
// 3) Allocate variables.
AllocateVariablesRecursively();
@@ -627,30 +643,48 @@ int Scope::ContextChainLength(Scope* scope) {
Scope* Scope::DeclarationScope() {
Scope* scope = this;
- while (scope->is_catch_scope() ||
- scope->is_block_scope()) {
+ while (!scope->is_declaration_scope()) {
scope = scope->outer_scope();
}
return scope;
}
-Handle<SerializedScopeInfo> Scope::GetSerializedScopeInfo() {
+Handle<ScopeInfo> Scope::GetScopeInfo() {
if (scope_info_.is_null()) {
- scope_info_ = SerializedScopeInfo::Create(this);
+ scope_info_ = ScopeInfo::Create(this);
}
return scope_info_;
}
+void Scope::GetNestedScopeChain(
+ List<Handle<ScopeInfo> >* chain,
+ int position) {
+ if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
+
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* scope = inner_scopes_[i];
+ int beg_pos = scope->start_position();
+ int end_pos = scope->end_position();
+ ASSERT(beg_pos >= 0 && end_pos >= 0);
+ if (beg_pos <= position && position < end_pos) {
+ scope->GetNestedScopeChain(chain, position);
+ return;
+ }
+ }
+}
+
+
#ifdef DEBUG
-static const char* Header(Scope::Type type) {
+static const char* Header(ScopeType type) {
switch (type) {
- case Scope::EVAL_SCOPE: return "eval";
- case Scope::FUNCTION_SCOPE: return "function";
- case Scope::GLOBAL_SCOPE: return "global";
- case Scope::CATCH_SCOPE: return "catch";
- case Scope::BLOCK_SCOPE: return "block";
+ case EVAL_SCOPE: return "eval";
+ case FUNCTION_SCOPE: return "function";
+ case GLOBAL_SCOPE: return "global";
+ case CATCH_SCOPE: return "catch";
+ case BLOCK_SCOPE: return "block";
+ case WITH_SCOPE: return "with";
}
UNREACHABLE();
return NULL;
@@ -695,9 +729,9 @@ static void PrintVar(int indent, Variable* var) {
PrintName(var->name());
PrintF("; // ");
PrintLocation(var);
- if (var->is_accessed_from_inner_scope()) {
+ if (var->has_forced_context_allocation()) {
if (!var->IsUnallocated()) PrintF(", ");
- PrintF("inner scope access");
+ PrintF("forced context allocation");
}
PrintF("\n");
}
@@ -746,18 +780,23 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
- if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
+ switch (language_mode()) {
+ case CLASSIC_MODE:
+ break;
+ case STRICT_MODE:
+ Indent(n1, "// strict mode scope\n");
+ break;
+ case EXTENDED_MODE:
+ Indent(n1, "// extended mode scope\n");
+ break;
+ }
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
if (outer_scope_calls_non_strict_eval_) {
Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
- if (outer_scope_is_eval_scope_) {
- Indent(n1, "// outer scope is 'eval' scope\n");
- }
if (num_stack_slots_ > 0) { Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_); }
if (num_heap_slots_ > 0) { Indent(n1, "// ");
@@ -779,9 +818,9 @@ void Scope::Print(int n) {
Indent(n1, "// dynamic vars\n");
if (dynamics_ != NULL) {
- PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC));
- PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
- PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
+ PrintMap(n1, dynamics_->GetMap(DYNAMIC));
+ PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
+ PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
}
// Print inner scopes (disable by providing negative n).
@@ -797,13 +836,20 @@ void Scope::Print(int n) {
#endif // DEBUG
-Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
VariableMap* map = dynamics_->GetMap(mode);
Variable* var = map->Lookup(name);
if (var == NULL) {
// Declare a new non-local.
- var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
+ InitializationFlag init_flag = (mode == VAR)
+ ? kCreatedInitialized : kNeedsInitialization;
+ var = map->Declare(NULL,
+ name,
+ mode,
+ true,
+ Variable::NORMAL,
+ init_flag);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(Variable::LOOKUP, -1);
}
@@ -811,80 +857,61 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
}
-// Lookup a variable starting with this scope. The result is either
-// the statically resolved variable belonging to an outer scope, or
-// NULL. It may be NULL because a) we couldn't find a variable, or b)
-// because the variable is just a guess (and may be shadowed by
-// another variable that is introduced dynamically via an 'eval' call
-// or a 'with' statement).
Variable* Scope::LookupRecursive(Handle<String> name,
- bool from_inner_scope,
- Variable** invalidated_local) {
- // If we find a variable, but the current scope calls 'eval', the found
- // variable may not be the correct one (the 'eval' may introduce a
- // property with the same name). In that case, remember that the variable
- // found is just a guess.
- bool guess = scope_calls_eval_;
-
+ BindingKind* binding_kind) {
+ ASSERT(binding_kind != NULL);
// Try to find the variable in this scope.
Variable* var = LocalLookup(name);
+ // We found a variable and we are done. (Even if there is an 'eval' in
+ // this scope which introduces the same variable again, the resulting
+ // variable remains the same.)
if (var != NULL) {
- // We found a variable. If this is not an inner lookup, we are done.
- // (Even if there is an 'eval' in this scope which introduces the
- // same variable again, the resulting variable remains the same.
- // Note that enclosing 'with' statements are handled at the call site.)
- if (!from_inner_scope)
- return var;
-
- } else {
- // We did not find a variable locally. Check against the function variable,
- // if any. We can do this for all scopes, since the function variable is
- // only present - if at all - for function scopes.
- //
- // This lookup corresponds to a lookup in the "intermediate" scope sitting
- // between this scope and the outer scope. (ECMA-262, 3rd., requires that
- // the name of named function literal is kept in an intermediate scope
- // in between this scope and the next outer scope.)
- if (function_ != NULL && function_->name().is_identical_to(name)) {
- var = function_->var();
-
- } else if (outer_scope_ != NULL) {
- var = outer_scope_->LookupRecursive(name, true, invalidated_local);
- // We may have found a variable in an outer scope. However, if
- // the current scope is inside a 'with', the actual variable may
- // be a property introduced via the 'with' statement. Then, the
- // variable we may have found is just a guess.
- if (scope_inside_with_)
- guess = true;
- }
-
- // If we did not find a variable, we are done.
- if (var == NULL)
- return NULL;
+ *binding_kind = BOUND;
+ return var;
}
- ASSERT(var != NULL);
-
- // If this is a lookup from an inner scope, mark the variable.
- if (from_inner_scope) {
- var->MarkAsAccessedFromInnerScope();
+ // We did not find a variable locally. Check against the function variable,
+ // if any. We can do this for all scopes, since the function variable is
+ // only present - if at all - for function scopes.
+ *binding_kind = UNBOUND;
+ var = LookupFunctionVar(name);
+ if (var != NULL) {
+ *binding_kind = BOUND;
+ } else if (outer_scope_ != NULL) {
+ var = outer_scope_->LookupRecursive(name, binding_kind);
+ if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
+ var->ForceContextAllocation();
+ }
+ } else {
+ ASSERT(is_global_scope());
}
- // If the variable we have found is just a guess, invalidate the
- // result. If the found variable is local, record that fact so we
- // can generate fast code to get it if it is not shadowed by eval.
- if (guess) {
- if (!var->is_global()) *invalidated_local = var;
- var = NULL;
+ if (is_with_scope()) {
+ // The current scope is a with scope, so the variable binding can not be
+ // statically resolved. However, note that it was necessary to do a lookup
+ // in the outer scope anyway, because if a binding exists in an outer scope,
+ // the associated variable has to be marked as potentially being accessed
+ // from inside of an inner with scope (the property may not be in the 'with'
+ // object).
+ *binding_kind = DYNAMIC_LOOKUP;
+ return NULL;
+ } else if (calls_non_strict_eval()) {
+ // A variable binding may have been found in an outer scope, but the current
+ // scope makes a non-strict 'eval' call, so the found variable may not be
+ // the correct one (the 'eval' may introduce a binding with the same name).
+ // In that case, change the lookup result to reflect this situation.
+ if (*binding_kind == BOUND) {
+ *binding_kind = BOUND_EVAL_SHADOWED;
+ } else if (*binding_kind == UNBOUND) {
+ *binding_kind = UNBOUND_EVAL_SHADOWED;
+ }
}
-
return var;
}
void Scope::ResolveVariable(Scope* global_scope,
- Handle<Context> context,
VariableProxy* proxy) {
ASSERT(global_scope == NULL || global_scope->is_global_scope());
@@ -893,116 +920,73 @@ void Scope::ResolveVariable(Scope* global_scope,
if (proxy->var() != NULL) return;
// Otherwise, try to resolve the variable.
- Variable* invalidated_local = NULL;
- Variable* var = LookupRecursive(proxy->name(), false, &invalidated_local);
-
- if (proxy->inside_with()) {
- // If we are inside a local 'with' statement, all bets are off
- // and we cannot resolve the proxy to a local variable even if
- // we found an outer matching variable.
- // Note that we must do a lookup anyway, because if we find one,
- // we must mark that variable as potentially accessed from this
- // inner scope (the property may not be in the 'with' object).
- var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
- } else {
- // We are not inside a local 'with' statement.
-
- if (var == NULL) {
- // We did not find the variable. We have a global variable
- // if we are in the global scope (we know already that we
- // are outside a 'with' statement) or if there is no way
- // that the variable might be introduced dynamically (through
- // a local or outer eval() call, or an outer 'with' statement),
- // or we don't know about the outer scope (because we are
- // in an eval scope).
- if (is_global_scope() ||
- !(scope_inside_with_ || outer_scope_is_eval_scope_ ||
- scope_calls_eval_ || outer_scope_calls_eval_)) {
- // We must have a global variable.
- ASSERT(global_scope != NULL);
- var = global_scope->DeclareGlobal(proxy->name());
-
- } else if (scope_inside_with_) {
- // If we are inside a with statement we give up and look up
- // the variable at runtime.
- var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
- } else if (invalidated_local != NULL) {
- // No with statements are involved and we found a local
- // variable that might be shadowed by eval introduced
- // variables.
- var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
- var->set_local_if_not_shadowed(invalidated_local);
-
- } else if (outer_scope_is_eval_scope_) {
- // No with statements and we did not find a local and the code
- // is executed with a call to eval. The context contains
- // scope information that we can use to determine if the
- // variable is global if it is not shadowed by eval-introduced
- // variables.
- if (context->GlobalIfNotShadowedByEval(proxy->name())) {
- var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
-
- } else {
- var = NonLocal(proxy->name(), Variable::DYNAMIC);
- }
+ BindingKind binding_kind;
+ Variable* var = LookupRecursive(proxy->name(), &binding_kind);
+ switch (binding_kind) {
+ case BOUND:
+ // We found a variable binding.
+ break;
+ case BOUND_EVAL_SHADOWED:
+ // We found a variable variable binding that might be shadowed
+ // by 'eval' introduced variable bindings.
+ if (var->is_global()) {
+ var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
} else {
- // No with statements and we did not find a local and the code
- // is not executed with a call to eval. We know that this
- // variable is global unless it is shadowed by eval-introduced
- // variables.
- var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
+ Variable* invalidated = var;
+ var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
+ var->set_local_if_not_shadowed(invalidated);
}
- }
+ break;
+
+ case UNBOUND:
+ // No binding has been found. Declare a variable in global scope.
+ ASSERT(global_scope != NULL);
+ var = global_scope->DeclareGlobal(proxy->name());
+ break;
+
+ case UNBOUND_EVAL_SHADOWED:
+ // No binding has been found. But some scope makes a
+ // non-strict 'eval' call.
+ var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+ break;
+
+ case DYNAMIC_LOOKUP:
+ // The variable could not be resolved statically.
+ var = NonLocal(proxy->name(), DYNAMIC);
+ break;
}
+ ASSERT(var != NULL);
proxy->BindTo(var);
}
-void Scope::ResolveVariablesRecursively(Scope* global_scope,
- Handle<Context> context) {
+void Scope::ResolveVariablesRecursively(Scope* global_scope) {
ASSERT(global_scope == NULL || global_scope->is_global_scope());
// Resolve unresolved variables for this scope.
for (int i = 0; i < unresolved_.length(); i++) {
- ResolveVariable(global_scope, context, unresolved_[i]);
+ ResolveVariable(global_scope, unresolved_[i]);
}
// Resolve unresolved variables for inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->ResolveVariablesRecursively(global_scope, context);
+ inner_scopes_[i]->ResolveVariablesRecursively(global_scope);
}
}
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
- bool outer_scope_calls_non_strict_eval,
- bool outer_scope_is_eval_scope) {
- if (outer_scope_calls_eval) {
- outer_scope_calls_eval_ = true;
- }
-
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
if (outer_scope_calls_non_strict_eval) {
outer_scope_calls_non_strict_eval_ = true;
}
- if (outer_scope_is_eval_scope) {
- outer_scope_is_eval_scope_ = true;
- }
-
- bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
- bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
bool calls_non_strict_eval =
- (scope_calls_eval_ && !is_strict_mode()) ||
- outer_scope_calls_non_strict_eval_;
+ this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_eval,
- calls_non_strict_eval,
- is_eval)) {
+ if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
inner_scope_calls_eval_ = true;
}
if (inner_scope->force_eager_compilation_) {
@@ -1019,7 +1003,7 @@ bool Scope::MustAllocate(Variable* var) {
// via an eval() call. This is only possible if the variable has a
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
- (var->is_accessed_from_inner_scope() ||
+ (var->has_forced_context_allocation() ||
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
scope_contains_with_ ||
@@ -1040,9 +1024,9 @@ bool Scope::MustAllocateInContext(Variable* var) {
//
// Exceptions: temporary variables are never allocated in a context;
// catch-bound variables are always allocated in a context.
- if (var->mode() == Variable::TEMPORARY) return false;
+ if (var->mode() == TEMPORARY) return false;
if (is_catch_scope() || is_block_scope()) return true;
- return var->is_accessed_from_inner_scope() ||
+ return var->has_forced_context_allocation() ||
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
scope_contains_with_ ||
@@ -1095,7 +1079,7 @@ void Scope::AllocateParameterLocals() {
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- uses_nonstrict_arguments = !is_strict_mode();
+ uses_nonstrict_arguments = is_classic_mode();
}
// The same parameter may occur multiple times in the parameters_ list.
@@ -1106,9 +1090,8 @@ void Scope::AllocateParameterLocals() {
Variable* var = params_[i];
ASSERT(var->scope() == this);
if (uses_nonstrict_arguments) {
- // Give the parameter a use from an inner scope, to force allocation
- // to the context.
- var->MarkAsAccessedFromInnerScope();
+ // Force context allocation of the parameter.
+ var->ForceContextAllocation();
}
if (MustAllocate(var)) {
@@ -1183,21 +1166,15 @@ void Scope::AllocateVariablesRecursively() {
if (is_function_scope()) AllocateParameterLocals();
AllocateNonParameterLocals();
- // Allocate context if necessary.
- bool must_have_local_context = false;
- if (scope_calls_eval_ || scope_contains_with_) {
- // The context for the eval() call or 'with' statement in this scope.
- // Unless we are in the global or an eval scope, we need a local
- // context even if we didn't statically allocate any locals in it,
- // and the compiler will access the context variable. If we are
- // not in an inner scope, the scope is provided from the outside.
- must_have_local_context = is_function_scope();
- }
+ // Force allocation of a context for this scope if necessary. For a 'with'
+ // scope and for a function scope that makes an 'eval' call we need a context,
+ // even if no local variables were statically allocated in the scope.
+ bool must_have_context = is_with_scope() ||
+ (is_function_scope() && calls_eval());
// If we didn't allocate any locals in the local context, then we only
- // need the minimal number of slots if we must have a local context.
- if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
- !must_have_local_context) {
+ // need the minimal number of slots if we must have a context.
+ if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
num_heap_slots_ = 0;
}
@@ -1205,4 +1182,17 @@ void Scope::AllocateVariablesRecursively() {
ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
}
+
+int Scope::StackLocalCount() const {
+ return num_stack_slots() -
+ (function_ != NULL && function_->var()->IsStackLocal() ? 1 : 0);
+}
+
+
+int Scope::ContextLocalCount() const {
+ if (num_heap_slots() == 0) return 0;
+ return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+ (function_ != NULL && function_->var()->IsContextSlot() ? 1 : 0);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 2917a63bb..523a251fa 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -42,17 +42,14 @@ class VariableMap: public HashMap {
public:
VariableMap();
- // Dummy constructor. This constructor doesn't set up the map
- // properly so don't use it unless you have a good reason.
- explicit VariableMap(bool gotta_love_static_overloading);
-
virtual ~VariableMap();
Variable* Declare(Scope* scope,
Handle<String> name,
- Variable::Mode mode,
+ VariableMode mode,
bool is_valid_lhs,
- Variable::Kind kind);
+ Variable::Kind kind,
+ InitializationFlag initialization_flag);
Variable* Lookup(Handle<String> name);
};
@@ -64,8 +61,8 @@ class VariableMap: public HashMap {
// and setup time for scopes that don't need them.
class DynamicScopePart : public ZoneObject {
public:
- VariableMap* GetMap(Variable::Mode mode) {
- int index = mode - Variable::DYNAMIC;
+ VariableMap* GetMap(VariableMode mode) {
+ int index = mode - DYNAMIC;
ASSERT(index >= 0 && index < 3);
return &maps_[index];
}
@@ -89,28 +86,19 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Construction
- enum Type {
- EVAL_SCOPE, // The top-level scope for an eval source.
- FUNCTION_SCOPE, // The top-level scope for a function.
- GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
- CATCH_SCOPE, // The scope introduced by catch.
- BLOCK_SCOPE // The scope introduced by a new block.
- };
-
- Scope(Scope* outer_scope, Type type);
+ Scope(Scope* outer_scope, ScopeType type);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
static bool Analyze(CompilationInfo* info);
- static Scope* DeserializeScopeChain(CompilationInfo* info,
- Scope* innermost_scope);
+ static Scope* DeserializeScopeChain(Context* context, Scope* global_scope);
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
- void Initialize(bool inside_with);
+ void Initialize();
// Checks if the block scope is redundant, i.e. it does not contain any
// block scoped declarations. In that case it is removed from the scope
@@ -123,6 +111,12 @@ class Scope: public ZoneObject {
// Lookup a variable in this scope. Returns the variable or NULL if not found.
Variable* LocalLookup(Handle<String> name);
+ // This lookup corresponds to a lookup in the "intermediate" scope sitting
+ // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+ // the name of named function literal is kept in an intermediate scope
+ // in between this scope and the next outer scope.)
+ Variable* LookupFunctionVar(Handle<String> name);
+
// Lookup a variable in this scope or outer scopes.
// Returns the variable or NULL if not found.
Variable* Lookup(Handle<String> name);
@@ -130,16 +124,18 @@ class Scope: public ZoneObject {
// Declare the function variable for a function literal. This variable
// is in an intermediate scope between this function scope and the the
// outer scope. Only possible for function scopes; at most one variable.
- Variable* DeclareFunctionVar(Handle<String> name);
+ Variable* DeclareFunctionVar(Handle<String> name, VariableMode mode);
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right.
- void DeclareParameter(Handle<String> name, Variable::Mode mode);
+ void DeclareParameter(Handle<String> name, VariableMode mode);
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
- Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
+ Variable* DeclareLocal(Handle<String> name,
+ VariableMode mode,
+ InitializationFlag init_flag);
// Declare an implicit global variable in this scope which must be a
// global scope. The variable was introduced (possibly from an inner
@@ -149,7 +145,6 @@ class Scope: public ZoneObject {
// Create a new unresolved variable.
VariableProxy* NewUnresolved(Handle<String> name,
- bool inside_with,
int position = RelocInfo::kNoPosition);
// Remove a unresolved variable. During parsing, an unresolved variable
@@ -199,11 +194,42 @@ class Scope: public ZoneObject {
void RecordWithStatement() { scope_contains_with_ = true; }
// Inform the scope that the corresponding code contains an eval call.
- void RecordEvalCall() { scope_calls_eval_ = true; }
+ void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
+
+ // Set the strict mode flag (unless disabled by a global flag).
+ void SetLanguageMode(LanguageMode language_mode) {
+ language_mode_ = language_mode;
+ }
- // Enable strict mode for the scope (unless disabled by a global flag).
- void EnableStrictMode() {
- strict_mode_ = FLAG_strict_mode;
+ // Position in the source where this scope begins and ends.
+ //
+ // * For the scope of a with statement
+ // with (obj) stmt
+ // start position: start position of first token of 'stmt'
+ // end position: end position of last token of 'stmt'
+ // * For the scope of a block
+ // { stmts }
+ // start position: start position of '{'
+ // end position: end position of '}'
+ // * For the scope of a function literal or decalaration
+ // function fun(a,b) { stmts }
+ // start position: start position of '('
+ // end position: end position of '}'
+ // * For the scope of a catch block
+ // try { stms } catch(e) { stmts }
+ // start position: start position of '('
+ // end position: end position of ')'
+ // * For the scope of a for-statement
+ // for (let x ...) stmt
+ // start position: start position of '('
+ // end position: end position of last token of 'stmt'
+ int start_position() const { return start_position_; }
+ void set_start_position(int statement_pos) {
+ start_position_ = statement_pos;
+ }
+ int end_position() const { return end_position_; }
+ void set_end_position(int statement_pos) {
+ end_position_ = statement_pos;
}
// ---------------------------------------------------------------------------
@@ -215,14 +241,25 @@ class Scope: public ZoneObject {
bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
- bool is_strict_mode() const { return strict_mode_; }
- bool is_strict_mode_eval_scope() const {
- return is_eval_scope() && is_strict_mode();
+ bool is_with_scope() const { return type_ == WITH_SCOPE; }
+ bool is_declaration_scope() const {
+ return is_eval_scope() || is_function_scope() || is_global_scope();
+ }
+ bool is_classic_mode() const {
+ return language_mode() == CLASSIC_MODE;
+ }
+ bool is_extended_mode() const {
+ return language_mode() == EXTENDED_MODE;
+ }
+ bool is_strict_or_extended_eval_scope() const {
+ return is_eval_scope() && !is_classic_mode();
}
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
- bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+ bool calls_non_strict_eval() {
+ return scope_calls_eval_ && is_classic_mode();
+ }
bool outer_scope_calls_non_strict_eval() const {
return outer_scope_calls_non_strict_eval_;
}
@@ -238,6 +275,12 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Accessors.
+ // The type of this scope.
+ ScopeType type() const { return type_; }
+
+ // The language mode of this scope.
+ LanguageMode language_mode() const { return language_mode_; }
+
// The variable corresponding the 'this' value.
Variable* receiver() { return receiver_; }
@@ -264,13 +307,17 @@ class Scope: public ZoneObject {
// Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; }
+ // Inner scope list.
+ ZoneList<Scope*>* inner_scopes() { return &inner_scopes_; }
// ---------------------------------------------------------------------------
// Variable allocation.
- // Collect all used locals in this scope.
- template<class Allocator>
- void CollectUsedVariables(List<Variable*, Allocator>* locals);
+ // Collect stack and context allocated local variables in this scope. Note
+ // that the function variable - if present - is not collected and should be
+ // handled separately.
+ void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+ ZoneList<Variable*>* context_locals);
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -280,7 +327,7 @@ class Scope: public ZoneObject {
// In the case of code compiled and run using 'eval', the context
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
- void AllocateVariables(Handle<Context> context);
+ void AllocateVariables(Scope* global_scope);
// Current number of var or const locals.
int num_var_or_const() { return num_var_or_const_; }
@@ -289,6 +336,9 @@ class Scope: public ZoneObject {
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
+ int StackLocalCount() const;
+ int ContextLocalCount() const;
+
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -305,7 +355,14 @@ class Scope: public ZoneObject {
// where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
- Handle<SerializedScopeInfo> GetSerializedScopeInfo();
+ Handle<ScopeInfo> GetScopeInfo();
+
+ // Get the chain of nested scopes within this scope for the source statement
+ // position. The scopes will be added to the list from the outermost scope to
+ // the innermost scope. Only nested block, catch or with scopes are tracked
+ // and will be returned, but no inner function scopes.
+ void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
+ int statement_position);
// ---------------------------------------------------------------------------
// Strict mode support.
@@ -330,8 +387,6 @@ class Scope: public ZoneObject {
protected:
friend class ParserFactory;
- explicit Scope(Type type);
-
Isolate* const isolate_;
// Scope tree.
@@ -339,7 +394,7 @@ class Scope: public ZoneObject {
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
// The scope type.
- Type type_;
+ ScopeType type_;
// Debugging support.
Handle<String> scope_name_;
@@ -379,14 +434,15 @@ class Scope: public ZoneObject {
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;
- // This scope is a strict mode scope.
- bool strict_mode_;
+ // The language mode of this scope.
+ LanguageMode language_mode_;
+ // Source positions.
+ int start_position_;
+ int end_position_;
// Computed via PropagateScopeInfo.
- bool outer_scope_calls_eval_;
bool outer_scope_calls_non_strict_eval_;
bool inner_scope_calls_eval_;
- bool outer_scope_is_eval_scope_;
bool force_eager_compilation_;
// True if it doesn't need scope resolution (e.g., if the scope was
@@ -396,32 +452,75 @@ class Scope: public ZoneObject {
// Computed as variables are declared.
int num_var_or_const_;
- // Computed via AllocateVariables; function scopes only.
+ // Computed via AllocateVariables; function, block and catch scopes only.
int num_stack_slots_;
int num_heap_slots_;
- // Serialized scopes support.
- Handle<SerializedScopeInfo> scope_info_;
+ // Serialized scope info support.
+ Handle<ScopeInfo> scope_info_;
bool already_resolved() { return already_resolved_; }
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
- Variable* NonLocal(Handle<String> name, Variable::Mode mode);
+ Variable* NonLocal(Handle<String> name, VariableMode mode);
// Variable resolution.
+ // Possible results of a recursive variable lookup telling if and how a
+ // variable is bound. These are returned in the output parameter *binding_kind
+ // of the LookupRecursive function.
+ enum BindingKind {
+ // The variable reference could be statically resolved to a variable binding
+ // which is returned. There is no 'with' statement between the reference and
+ // the binding and no scope between the reference scope (inclusive) and
+ // binding scope (exclusive) makes a non-strict 'eval' call.
+ BOUND,
+
+ // The variable reference could be statically resolved to a variable binding
+ // which is returned. There is no 'with' statement between the reference and
+ // the binding, but some scope between the reference scope (inclusive) and
+ // binding scope (exclusive) makes a non-strict 'eval' call, that might
+ // possibly introduce variable bindings shadowing the found one. Thus the
+ // found variable binding is just a guess.
+ BOUND_EVAL_SHADOWED,
+
+ // The variable reference could not be statically resolved to any binding
+ // and thus should be considered referencing a global variable. NULL is
+ // returned. The variable reference is not inside any 'with' statement and
+ // no scope between the reference scope (inclusive) and global scope
+ // (exclusive) makes a non-strict 'eval' call.
+ UNBOUND,
+
+ // The variable reference could not be statically resolved to any binding
+ // NULL is returned. The variable reference is not inside any 'with'
+ // statement, but some scope between the reference scope (inclusive) and
+ // global scope (exclusive) makes a non-strict 'eval' call, that might
+ // possibly introduce a variable binding. Thus the reference should be
+ // considered referencing a global variable unless it is shadowed by an
+ // 'eval' introduced binding.
+ UNBOUND_EVAL_SHADOWED,
+
+ // The variable could not be statically resolved and needs to be looked up
+ // dynamically. NULL is returned. There are two possible reasons:
+ // * A 'with' statement has been encountered and there is no variable
+ // binding for the name between the variable reference and the 'with'.
+ // The variable potentially references a property of the 'with' object.
+ // * The code is being executed as part of a call to 'eval' and the calling
+ // context chain contains either a variable binding for the name or it
+ // contains a 'with' context.
+ DYNAMIC_LOOKUP
+ };
+
+ // Lookup a variable reference given by name recursively starting with this
+ // scope. If the code is executed because of a call to 'eval', the context
+ // parameter should be set to the calling context of 'eval'.
Variable* LookupRecursive(Handle<String> name,
- bool from_inner_function,
- Variable** invalidated_local);
+ BindingKind* binding_kind);
void ResolveVariable(Scope* global_scope,
- Handle<Context> context,
VariableProxy* proxy);
- void ResolveVariablesRecursively(Scope* global_scope,
- Handle<Context> context);
+ void ResolveVariablesRecursively(Scope* global_scope);
// Scope analysis.
- bool PropagateScopeInfo(bool outer_scope_calls_eval,
- bool outer_scope_calls_non_strict_eval,
- bool outer_scope_is_eval_scope);
+ bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
bool HasTrivialContext() const;
// Predicates.
@@ -438,8 +537,8 @@ class Scope: public ZoneObject {
void AllocateVariablesRecursively();
private:
- // Construct a function or block scope based on the scope info.
- Scope(Scope* inner_scope, Type type, Handle<SerializedScopeInfo> scope_info);
+ // Construct a scope based on the scope info.
+ Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
// Construct a catch scope with a binding for the name.
Scope(Scope* inner_scope, Handle<String> catch_variable_name);
@@ -451,9 +550,9 @@ class Scope: public ZoneObject {
}
}
- void SetDefaults(Type type,
+ void SetDefaults(ScopeType type,
Scope* outer_scope,
- Handle<SerializedScopeInfo> scope_info);
+ Handle<ScopeInfo> scope_info);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index ecb480a8f..d0a1a639f 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -300,16 +300,28 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
4,
"HandleScope::DeleteExtensions");
+ Add(ExternalReference::
+ incremental_marking_record_write_function(isolate).address(),
+ RUNTIME_ENTRY,
+ 5,
+ "IncrementalMarking::RecordWrite");
+ Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+ RUNTIME_ENTRY,
+ 6,
+ "StoreBuffer::StoreBufferOverflow");
+ Add(ExternalReference::
+ incremental_evacuation_record_write_function(isolate).address(),
+ RUNTIME_ENTRY,
+ 7,
+ "IncrementalMarking::RecordWrite");
+
+
// Miscellaneous
- Add(ExternalReference::the_hole_value_location(isolate).address(),
- UNCLASSIFIED,
- 2,
- "Factory::the_hole_value().location()");
- Add(ExternalReference::roots_address(isolate).address(),
+ Add(ExternalReference::roots_array_start(isolate).address(),
UNCLASSIFIED,
3,
- "Heap::roots_address()");
+ "Heap::roots_array_start()");
Add(ExternalReference::address_of_stack_limit(isolate).address(),
UNCLASSIFIED,
4,
@@ -351,129 +363,137 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
UNCLASSIFIED,
- 13,
+ 14,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
UNCLASSIFIED,
- 14,
+ 15,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break(isolate).address(),
UNCLASSIFIED,
- 15,
+ 16,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
UNCLASSIFIED,
- 16,
+ 17,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
UNCLASSIFIED,
- 17,
+ 18,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
UNCLASSIFIED,
- 18,
+ 19,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
UNCLASSIFIED,
- 19,
+ 20,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
UNCLASSIFIED,
- 20,
+ 21,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
UNCLASSIFIED,
- 21,
+ 22,
"mod_two_doubles");
Add(ExternalReference::compare_doubles(isolate).address(),
UNCLASSIFIED,
- 22,
+ 23,
"compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
UNCLASSIFIED,
- 23,
+ 24,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
UNCLASSIFIED,
- 24,
+ 25,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack(isolate).address(),
UNCLASSIFIED,
- 25,
+ 26,
"NativeRegExpMacroAssembler::GrowStack()");
Add(ExternalReference::re_word_character_map().address(),
UNCLASSIFIED,
- 26,
+ 27,
"NativeRegExpMacroAssembler::word_character_map");
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
UNCLASSIFIED,
- 27,
+ 28,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
UNCLASSIFIED,
- 28,
+ 29,
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
UNCLASSIFIED,
- 29,
+ 30,
"TranscendentalCache::caches()");
Add(ExternalReference::handle_scope_next_address().address(),
UNCLASSIFIED,
- 30,
+ 31,
"HandleScope::next");
Add(ExternalReference::handle_scope_limit_address().address(),
UNCLASSIFIED,
- 31,
+ 32,
"HandleScope::limit");
Add(ExternalReference::handle_scope_level_address().address(),
UNCLASSIFIED,
- 32,
+ 33,
"HandleScope::level");
Add(ExternalReference::new_deoptimizer_function(isolate).address(),
UNCLASSIFIED,
- 33,
+ 34,
"Deoptimizer::New()");
Add(ExternalReference::compute_output_frames_function(isolate).address(),
UNCLASSIFIED,
- 34,
+ 35,
"Deoptimizer::ComputeOutputFrames()");
Add(ExternalReference::address_of_min_int().address(),
UNCLASSIFIED,
- 35,
+ 36,
"LDoubleConstant::min_int");
Add(ExternalReference::address_of_one_half().address(),
UNCLASSIFIED,
- 36,
+ 37,
"LDoubleConstant::one_half");
Add(ExternalReference::isolate_address().address(),
UNCLASSIFIED,
- 37,
+ 38,
"isolate");
Add(ExternalReference::address_of_minus_zero().address(),
UNCLASSIFIED,
- 38,
+ 39,
"LDoubleConstant::minus_zero");
Add(ExternalReference::address_of_negative_infinity().address(),
UNCLASSIFIED,
- 39,
+ 40,
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function(isolate).address(),
UNCLASSIFIED,
- 40,
+ 41,
"power_double_double_function");
Add(ExternalReference::power_double_int_function(isolate).address(),
UNCLASSIFIED,
- 41,
+ 42,
"power_double_int_function");
- Add(ExternalReference::arguments_marker_location(isolate).address(),
+ Add(ExternalReference::store_buffer_top(isolate).address(),
UNCLASSIFIED,
- 42,
- "Factory::arguments_marker().location()");
+ 43,
+ "store_buffer_top");
+ Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
+ UNCLASSIFIED,
+ 44,
+ "canonical_nan");
+ Add(ExternalReference::address_of_the_hole_nan().address(),
+ UNCLASSIFIED,
+ 45,
+ "the_hole_nan");
}
@@ -569,6 +589,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
maybe_new_allocation =
reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
}
+ ASSERT(!maybe_new_allocation->IsFailure());
Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
HeapObject* new_object = HeapObject::cast(new_allocation);
address = new_object->address();
@@ -577,14 +598,13 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
ASSERT(SpaceIsLarge(space_index));
LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
Object* new_allocation;
- if (space_index == kLargeData) {
- new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
- } else if (space_index == kLargeFixedArray) {
+ if (space_index == kLargeData || space_index == kLargeFixedArray) {
new_allocation =
- lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
+ lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
} else {
ASSERT_EQ(kLargeCode, space_index);
- new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
+ new_allocation =
+ lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
}
HeapObject* new_object = HeapObject::cast(new_allocation);
// Record all large objects in the same space.
@@ -629,6 +649,7 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
void Deserializer::Deserialize() {
isolate_ = Isolate::Current();
+ ASSERT(isolate_ != NULL);
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
@@ -648,6 +669,14 @@ void Deserializer::Deserialize() {
isolate_->heap()->set_global_contexts_list(
isolate_->heap()->undefined_value());
+
+ // Update data pointers to the external strings containing natives sources.
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Object* source = isolate_->heap()->natives_source_cache()->get(i);
+ if (!source->IsUndefined()) {
+ ExternalAsciiString::cast(source)->update_data_cache();
+ }
+ }
}
@@ -685,9 +714,8 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
// This routine writes the new object into the pointer provided and then
// returns true if the new object was in young space and false otherwise.
// The reason for this strange interface is that otherwise the object is
-// written very late, which means the ByteArray map is not set up by the
-// time we need to use it to mark the space at the end of a page free (by
-// making it into a byte array).
+// written very late, which means the FreeSpace map is not set up by the
+// time we need to use it to mark the space at the end of a page free.
void Deserializer::ReadObject(int space_number,
Space* space,
Object** write_back) {
@@ -737,8 +765,13 @@ static const int kUnknownOffsetFromStart = -1;
void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
- Address address) {
+ Address current_object_address) {
Isolate* const isolate = isolate_;
+ bool write_barrier_needed = (current_object_address != NULL &&
+ source_space != NEW_SPACE &&
+ source_space != CELL_SPACE &&
+ source_space != CODE_SPACE &&
+ source_space != OLD_DATA_SPACE);
while (current < limit) {
int data = source_->Get();
switch (data) {
@@ -758,8 +791,7 @@ void Deserializer::ReadChunk(Object** current,
if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
ASSIGN_DEST_SPACE(space_number) \
ReadObject(space_number, dest_space, current); \
- emit_write_barrier = \
- (space_number == NEW_SPACE && source_space != NEW_SPACE); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
} else { \
Object* new_object = NULL; /* May not be a real Object pointer. */ \
if (where == kNewObject) { \
@@ -767,25 +799,25 @@ void Deserializer::ReadChunk(Object** current,
ReadObject(space_number, dest_space, &new_object); \
} else if (where == kRootArray) { \
int root_id = source_->GetInt(); \
- new_object = isolate->heap()->roots_address()[root_id]; \
+ new_object = isolate->heap()->roots_array_start()[root_id]; \
+ emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kPartialSnapshotCache) { \
int cache_index = source_->GetInt(); \
new_object = isolate->serialize_partial_snapshot_cache() \
[cache_index]; \
+ emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kExternalReference) { \
int reference_id = source_->GetInt(); \
Address address = external_reference_decoder_-> \
Decode(reference_id); \
new_object = reinterpret_cast<Object*>(address); \
} else if (where == kBackref) { \
- emit_write_barrier = \
- (space_number == NEW_SPACE && source_space != NEW_SPACE); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetAddressFromEnd(data & kSpaceMask); \
} else { \
ASSERT(where == kFromStart); \
if (offset_from_start == kUnknownOffsetFromStart) { \
- emit_write_barrier = \
- (space_number == NEW_SPACE && source_space != NEW_SPACE); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetAddressFromStart(data & kSpaceMask); \
} else { \
Address object_address = pages_[space_number][0] + \
@@ -812,12 +844,14 @@ void Deserializer::ReadChunk(Object** current,
*current = new_object; \
} \
} \
- if (emit_write_barrier) { \
- isolate->heap()->RecordWrite(address, static_cast<int>( \
- reinterpret_cast<Address>(current) - address)); \
+ if (emit_write_barrier && write_barrier_needed) { \
+ Address current_address = reinterpret_cast<Address>(current); \
+ isolate->heap()->RecordWrite( \
+ current_object_address, \
+ static_cast<int>(current_address - current_object_address)); \
} \
if (!current_was_incremented) { \
- current++; /* Increment current if it wasn't done above. */ \
+ current++; \
} \
break; \
} \
@@ -864,11 +898,17 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, kLargeCode) \
CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
-#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \
- space_number, \
- offset_from_start) \
- CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number) \
- CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
+#define FOUR_CASES(byte_code) \
+ case byte_code: \
+ case byte_code + 1: \
+ case byte_code + 2: \
+ case byte_code + 3:
+
+#define SIXTEEN_CASES(byte_code) \
+ FOUR_CASES(byte_code) \
+ FOUR_CASES(byte_code + 4) \
+ FOUR_CASES(byte_code + 8) \
+ FOUR_CASES(byte_code + 12)
// We generate 15 cases and bodies that process special tags that combine
// the raw data tag and the length into one byte.
@@ -892,6 +932,38 @@ void Deserializer::ReadChunk(Object** current,
break;
}
+ SIXTEEN_CASES(kRootArrayLowConstants)
+ SIXTEEN_CASES(kRootArrayHighConstants) {
+ int root_id = RootArrayConstantFromByteCode(data);
+ Object* object = isolate->heap()->roots_array_start()[root_id];
+ ASSERT(!isolate->heap()->InNewSpace(object));
+ *current++ = object;
+ break;
+ }
+
+ case kRepeat: {
+ int repeats = source_->GetInt();
+ Object* object = current[-1];
+ ASSERT(!isolate->heap()->InNewSpace(object));
+ for (int i = 0; i < repeats; i++) current[i] = object;
+ current += repeats;
+ break;
+ }
+
+ STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
+ Heap::kOldSpaceRoots);
+ STATIC_ASSERT(kMaxRepeats == 12);
+ FOUR_CASES(kConstantRepeat)
+ FOUR_CASES(kConstantRepeat + 4)
+ FOUR_CASES(kConstantRepeat + 8) {
+ int repeats = RepeatsForCode(data);
+ Object* object = current[-1];
+ ASSERT(!isolate->heap()->InNewSpace(object));
+ for (int i = 0; i < repeats; i++) current[i] = object;
+ current += repeats;
+ break;
+ }
+
// Deserialize a new object and write a pointer to it to the current
// object.
ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
@@ -917,9 +989,6 @@ void Deserializer::ReadChunk(Object** current,
// start and write a pointer to its first instruction to the current code
// object.
ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
- // Find an already deserialized object at one of the predetermined popular
- // offsets from the start and write a pointer to it in the current object.
- COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
// Find an object in the roots array and write a pointer to it to the
// current object.
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
@@ -961,7 +1030,6 @@ void Deserializer::ReadChunk(Object** current,
#undef CASE_BODY
#undef ONE_PER_SPACE
#undef ALL_SPACES
-#undef EMIT_COMMON_REFERENCE_PATTERNS
#undef ASSIGN_DEST_SPACE
case kNewPage: {
@@ -973,6 +1041,11 @@ void Deserializer::ReadChunk(Object** current,
break;
}
+ case kSkip: {
+ current++;
+ break;
+ }
+
case kNativesStringResource: {
int index = source_->Get();
Vector<const char> source_vector = Natives::GetRawScriptSource(index);
@@ -1043,7 +1116,8 @@ Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder),
- large_object_total_(0) {
+ large_object_total_(0),
+ root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
ASSERT(Isolate::Current()->IsDefaultIsolate());
@@ -1066,11 +1140,8 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
// We don't support serializing installed extensions.
- for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
- ext != NULL;
- ext = ext->next()) {
- CHECK_NE(v8::INSTALLED, ext->state());
- }
+ CHECK(!isolate->has_installed_extensions());
+
HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1097,8 +1168,17 @@ void PartialSerializer::Serialize(Object** object) {
void Serializer::VisitPointers(Object** start, Object** end) {
+ Isolate* isolate = Isolate::Current();
+
for (Object** current = start; current < end; current++) {
- if ((*current)->IsSmi()) {
+ if (start == isolate->heap()->roots_array_start()) {
+ root_index_wave_front_ =
+ Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
+ }
+ if (reinterpret_cast<Address>(current) ==
+ isolate->heap()->store_buffer()->TopAddress()) {
+ sink_->Put(kSkip, "Skip");
+ } else if ((*current)->IsSmi()) {
sink_->Put(kRawData, "RawData");
sink_->PutInt(kPointerSize, "length");
for (int i = 0; i < kPointerSize; i++) {
@@ -1162,10 +1242,12 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
}
-int PartialSerializer::RootIndex(HeapObject* heap_object) {
- for (int i = 0; i < Heap::kRootListLength; i++) {
- Object* root = HEAP->roots_address()[i];
- if (root == heap_object) return i;
+int Serializer::RootIndex(HeapObject* heap_object) {
+ Heap* heap = HEAP;
+ if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
+ for (int i = 0; i < root_index_wave_front_; i++) {
+ Object* root = heap->roots_array_start()[i];
+ if (!root->IsSmi() && root == heap_object) return i;
}
return kInvalidRootIndex;
}
@@ -1201,18 +1283,8 @@ void Serializer::SerializeReferenceToPreviousObject(
// all objects) then we should shift out the bits that are always 0.
if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
if (from_start) {
-#define COMMON_REFS_CASE(pseudo_space, actual_space, offset) \
- if (space == actual_space && address == offset && \
- how_to_code == kPlain && where_to_point == kStartOfObject) { \
- sink_->Put(kFromStart + how_to_code + where_to_point + \
- pseudo_space, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
- sink_->PutInt(address, "address");
- }
+ sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
+ sink_->PutInt(address, "address");
} else {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
sink_->PutInt(address, "address");
@@ -1227,6 +1299,12 @@ void StartupSerializer::SerializeObject(
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
+ int root_index;
+ if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ PutRoot(root_index, heap_object, how_to_code, where_to_point);
+ return;
+ }
+
if (address_mapper_.IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
@@ -1257,6 +1335,28 @@ void StartupSerializer::SerializeWeakReferences() {
}
+void Serializer::PutRoot(int root_index,
+ HeapObject* object,
+ SerializerDeserializer::HowToCode how_to_code,
+ SerializerDeserializer::WhereToPoint where_to_point) {
+ if (how_to_code == kPlain &&
+ where_to_point == kStartOfObject &&
+ root_index < kRootArrayNumberOfConstantEncodings &&
+ !HEAP->InNewSpace(object)) {
+ if (root_index < kRootArrayNumberOfLowConstantEncodings) {
+ sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant");
+ } else {
+ sink_->Put(kRootArrayHighConstants + root_index -
+ kRootArrayNumberOfLowConstantEncodings,
+ "RootHiConstant");
+ }
+ } else {
+ sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ }
+}
+
+
void PartialSerializer::SerializeObject(
Object* o,
HowToCode how_to_code,
@@ -1266,8 +1366,7 @@ void PartialSerializer::SerializeObject(
int root_index;
if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
- sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
+ PutRoot(root_index, heap_object, how_to_code, where_to_point);
return;
}
@@ -1345,14 +1444,48 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
if (current < end) OutputRawData(reinterpret_cast<Address>(current));
while (current < end && !(*current)->IsSmi()) {
- serializer_->SerializeObject(*current, kPlain, kStartOfObject);
- bytes_processed_so_far_ += kPointerSize;
- current++;
+ HeapObject* current_contents = HeapObject::cast(*current);
+ int root_index = serializer_->RootIndex(current_contents);
+ // Repeats are not subject to the write barrier so there are only some
+ // objects that can be used in a repeat encoding. These are the early
+ // ones in the root array that are never in new space.
+ if (current != start &&
+ root_index != kInvalidRootIndex &&
+ root_index < kRootArrayNumberOfConstantEncodings &&
+ current_contents == current[-1]) {
+ ASSERT(!HEAP->InNewSpace(current_contents));
+ int repeat_count = 1;
+ while (current < end - 1 && current[repeat_count] == current_contents) {
+ repeat_count++;
+ }
+ current += repeat_count;
+ bytes_processed_so_far_ += repeat_count * kPointerSize;
+ if (repeat_count > kMaxRepeats) {
+ sink_->Put(kRepeat, "SerializeRepeats");
+ sink_->PutInt(repeat_count, "SerializeRepeats");
+ } else {
+ sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
+ }
+ } else {
+ serializer_->SerializeObject(current_contents, kPlain, kStartOfObject);
+ bytes_processed_so_far_ += kPointerSize;
+ current++;
+ }
}
}
}
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ Object** current = rinfo->target_object_address();
+
+ OutputRawData(rinfo->target_address_address());
+ HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ serializer_->SerializeObject(*current, representation, kStartOfObject);
+ bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+
void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
Address* end) {
Address references_start = reinterpret_cast<Address>(start);
@@ -1367,6 +1500,20 @@ void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
}
+void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
+ Address references_start = rinfo->target_address_address();
+ OutputRawData(references_start);
+
+ Address* current = rinfo->target_reference_address();
+ int representation = rinfo->IsCodedSpecially() ?
+ kFromCode + kStartOfObject : kPlain + kStartOfObject;
+ sink_->Put(kExternalReference + representation, "ExternalRef");
+ int reference_id = serializer_->EncodeExternalReference(*current);
+ sink_->PutInt(reference_id, "reference id");
+ bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
Address target_start = rinfo->target_address_address();
OutputRawData(target_start);
@@ -1420,7 +1567,7 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
- Resource* resource = string->resource();
+ const Resource* resource = string->resource();
if (resource == *resource_pointer) {
sink_->Put(kNativesStringResource, "NativesStringResource");
sink_->PutSection(i, "NativesStringResourceEnd");
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 66d6fb511..ff10905b9 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -187,24 +187,6 @@ class SnapshotByteSource {
};
-// It is very common to have a reference to objects at certain offsets in the
-// heap. These offsets have been determined experimentally. We code
-// references to such objects in a single byte that encodes the way the pointer
-// is written (only plain pointers allowed), the space number and the offset.
-// This only works for objects in the first page of a space. Don't use this for
-// things in newspace since it bypasses the write barrier.
-
-static const int k64 = (sizeof(uintptr_t) - 4) / 4;
-
-#define COMMON_REFERENCE_PATTERNS(f) \
- f(kNumberOfSpaces, 2, (11 - k64)) \
- f((kNumberOfSpaces + 1), 2, 0) \
- f((kNumberOfSpaces + 2), 2, (142 - 16 * k64)) \
- f((kNumberOfSpaces + 3), 2, (74 - 15 * k64)) \
- f((kNumberOfSpaces + 4), 2, 5) \
- f((kNumberOfSpaces + 5), 1, 135) \
- f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
-
#define COMMON_RAW_LENGTHS(f) \
f(1, 1) \
f(2, 2) \
@@ -238,10 +220,11 @@ class SerializerDeserializer: public ObjectVisitor {
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
- // 0xc-0xf Free.
+ kSkip = 0xc, // Skip a pointer sized cell.
+ // 0xd-0xf Free.
kBackref = 0x10, // Object is described relative to end.
// 0x11-0x18 One per space.
- // 0x19-0x1f Common backref offsets.
+ // 0x19-0x1f Free.
kFromStart = 0x20, // Object is described relative to start.
// 0x21-0x28 One per space.
// 0x29-0x2f Free.
@@ -278,9 +261,29 @@ class SerializerDeserializer: public ObjectVisitor {
// is referred to from external strings in the snapshot.
static const int kNativesStringResource = 0x71;
static const int kNewPage = 0x72;
- // 0x73-0x7f Free.
- // 0xb0-0xbf Free.
- // 0xf0-0xff Free.
+ static const int kRepeat = 0x73;
+ static const int kConstantRepeat = 0x74;
+ // 0x74-0x7f Repeat last word (subtract 0x73 to get the count).
+ static const int kMaxRepeats = 0x7f - 0x73;
+ static int CodeForRepeats(int repeats) {
+ ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
+ return 0x73 + repeats;
+ }
+ static int RepeatsForCode(int byte_code) {
+ ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
+ return byte_code - 0x73;
+ }
+ static const int kRootArrayLowConstants = 0xb0;
+ // 0xb0-0xbf Things from the first 16 elements of the root array.
+ static const int kRootArrayHighConstants = 0xf0;
+ // 0xf0-0xff Things from the next 16 elements of the root array.
+ static const int kRootArrayNumberOfConstantEncodings = 0x20;
+ static const int kRootArrayNumberOfLowConstantEncodings = 0x10;
+ static int RootArrayConstantFromByteCode(int byte_code) {
+ int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2);
+ ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings);
+ return constant;
+ }
static const int kLargeData = LAST_SPACE;
@@ -353,7 +356,13 @@ class Deserializer: public SerializerDeserializer {
UNREACHABLE();
}
- void ReadChunk(Object** start, Object** end, int space, Address address);
+ // Fills in some heap data in an area from start to end (non-inclusive). The
+ // space id is used for the write barrier. The object_address is the address
+ // of the object we are writing into, or NULL if we are not writing into an
+ // object, ie if we are writing a series of tagged values that are not on the
+ // heap.
+ void ReadChunk(
+ Object** start, Object** end, int space, Address object_address);
HeapObject* GetAddressFromStart(int space);
inline HeapObject* GetAddressFromEnd(int space);
Address Allocate(int space_number, Space* space, int size);
@@ -474,14 +483,22 @@ class Serializer : public SerializerDeserializer {
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
+ void PutRoot(
+ int index, HeapObject* object, HowToCode how, WhereToPoint where);
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
protected:
static const int kInvalidRootIndex = -1;
- virtual int RootIndex(HeapObject* heap_object) = 0;
+
+ int RootIndex(HeapObject* heap_object);
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+ intptr_t root_index_wave_front() { return root_index_wave_front_; }
+ void set_root_index_wave_front(intptr_t value) {
+ ASSERT(value >= root_index_wave_front_);
+ root_index_wave_front_ = value;
+ }
class ObjectSerializer : public ObjectVisitor {
public:
@@ -497,7 +514,9 @@ class Serializer : public SerializerDeserializer {
bytes_processed_so_far_(0) { }
void Serialize();
void VisitPointers(Object** start, Object** end);
+ void VisitEmbeddedPointer(RelocInfo* target);
void VisitExternalReferences(Address* start, Address* end);
+ void VisitExternalReference(RelocInfo* rinfo);
void VisitCodeTarget(RelocInfo* target);
void VisitCodeEntry(Address entry_address);
void VisitGlobalPropertyCell(RelocInfo* rinfo);
@@ -557,6 +576,7 @@ class Serializer : public SerializerDeserializer {
static bool too_late_to_enable_now_;
int large_object_total_;
SerializationAddressMapper address_mapper_;
+ intptr_t root_index_wave_front_;
friend class ObjectSerializer;
friend class Deserializer;
@@ -571,6 +591,7 @@ class PartialSerializer : public Serializer {
SnapshotByteSink* sink)
: Serializer(sink),
startup_serializer_(startup_snapshot_serializer) {
+ set_root_index_wave_front(Heap::kStrongRootListLength);
}
// Serialize the objects reachable from a single object pointer.
@@ -580,7 +601,6 @@ class PartialSerializer : public Serializer {
WhereToPoint where_to_point);
protected:
- virtual int RootIndex(HeapObject* o);
virtual int PartialSnapshotCacheIndex(HeapObject* o);
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
// Scripts should be referred only through shared function infos. We can't
@@ -590,7 +610,7 @@ class PartialSerializer : public Serializer {
ASSERT(!o->IsScript());
return o->IsString() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
- o->IsSerializedScopeInfo() ||
+ o->IsScopeInfo() ||
o->map() == HEAP->fixed_cow_array_map();
}
@@ -605,7 +625,7 @@ class StartupSerializer : public Serializer {
explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
// Clear the cache of objects used by the partial snapshot. After the
// strong roots have been serialized we can create a partial snapshot
- // which will repopulate the cache with objects neede by that partial
+ // which will repopulate the cache with objects needed by that partial
// snapshot.
Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
}
@@ -624,7 +644,6 @@ class StartupSerializer : public Serializer {
}
private:
- virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
return false;
}
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 35d722409..1cfdc138c 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,355 +37,213 @@ namespace internal {
// -----------------------------------------------------------------------------
-// PageIterator
+// Bitmap
-bool PageIterator::has_next() {
- return prev_page_ != stop_page_;
-}
-
-
-Page* PageIterator::next() {
- ASSERT(has_next());
- prev_page_ = (prev_page_ == NULL)
- ? space_->first_page_
- : prev_page_->next_page();
- return prev_page_;
+void Bitmap::Clear(MemoryChunk* chunk) {
+ Bitmap* bitmap = chunk->markbits();
+ for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
+ chunk->ResetLiveBytes();
}
// -----------------------------------------------------------------------------
-// Page
-
-Page* Page::next_page() {
- return heap_->isolate()->memory_allocator()->GetNextPage(this);
-}
-
-
-Address Page::AllocationTop() {
- PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
- return owner->PageAllocationTop(this);
-}
-
-
-Address Page::AllocationWatermark() {
- PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
- if (this == owner->AllocationTopPage()) {
- return owner->top();
- }
- return address() + AllocationWatermarkOffset();
-}
-
+// PageIterator
-uint32_t Page::AllocationWatermarkOffset() {
- return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
- kAllocationWatermarkOffsetShift);
-}
+PageIterator::PageIterator(PagedSpace* space)
+ : space_(space),
+ prev_page_(&space->anchor_),
+ next_page_(prev_page_->next_page()) { }
-void Page::SetAllocationWatermark(Address allocation_watermark) {
- if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
- // When iterating intergenerational references during scavenge
- // we might decide to promote an encountered young object.
- // We will allocate a space for such an object and put it
- // into the promotion queue to process it later.
- // If space for object was allocated somewhere beyond allocation
- // watermark this might cause garbage pointers to appear under allocation
- // watermark. To avoid visiting them during dirty regions iteration
- // which might be still in progress we store a valid allocation watermark
- // value and mark this page as having an invalid watermark.
- SetCachedAllocationWatermark(AllocationWatermark());
- InvalidateWatermark(true);
- }
- flags_ = (flags_ & kFlagsMask) |
- Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
- ASSERT(AllocationWatermarkOffset()
- == static_cast<uint32_t>(Offset(allocation_watermark)));
+bool PageIterator::has_next() {
+ return next_page_ != &space_->anchor_;
}
-void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
- mc_first_forwarded = allocation_watermark;
+Page* PageIterator::next() {
+ ASSERT(has_next());
+ prev_page_ = next_page_;
+ next_page_ = next_page_->next_page();
+ return prev_page_;
}
-Address Page::CachedAllocationWatermark() {
- return mc_first_forwarded;
-}
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
-uint32_t Page::GetRegionMarks() {
- return dirty_regions_;
-}
+NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
+ : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
+ next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
+ last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+ : prev_page_(space->anchor()),
+ next_page_(prev_page_->next_page()),
+ last_page_(prev_page_->prev_page()) { }
-void Page::SetRegionMarks(uint32_t marks) {
- dirty_regions_ = marks;
+NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+ : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+ next_page_(NewSpacePage::FromAddress(start)),
+ last_page_(NewSpacePage::FromLimit(limit)) {
+ SemiSpace::AssertValidRange(start, limit);
}
-int Page::GetRegionNumberForAddress(Address addr) {
- // Each page is divided into 256 byte regions. Each region has a corresponding
- // dirty mark bit in the page header. Region can contain intergenerational
- // references iff its dirty mark is set.
- // A normal 8K page contains exactly 32 regions so all region marks fit
- // into 32-bit integer field. To calculate a region number we just divide
- // offset inside page by region size.
- // A large page can contain more then 32 regions. But we want to avoid
- // additional write barrier code for distinguishing between large and normal
- // pages so we just ignore the fact that addr points into a large page and
- // calculate region number as if addr pointed into a normal 8K page. This way
- // we get a region number modulo 32 so for large pages several regions might
- // be mapped to a single dirty mark.
- ASSERT_PAGE_ALIGNED(this->address());
- STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
-
- // We are using masking with kPageAlignmentMask instead of Page::Offset()
- // to get an offset to the beginning of 8K page containing addr not to the
- // beginning of actual page which can be bigger then 8K.
- intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
- return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
+bool NewSpacePageIterator::has_next() {
+ return prev_page_ != last_page_;
}
-uint32_t Page::GetRegionMaskForAddress(Address addr) {
- return 1 << GetRegionNumberForAddress(addr);
+NewSpacePage* NewSpacePageIterator::next() {
+ ASSERT(has_next());
+ prev_page_ = next_page_;
+ next_page_ = next_page_->next_page();
+ return prev_page_;
}
-uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
- uint32_t result = 0;
- static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
- if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
- result = kAllRegionsDirtyMarks;
- } else if (length_in_bytes > 0) {
- int start_region = GetRegionNumberForAddress(start);
- int end_region =
- GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
- uint32_t start_mask = (~0) << start_region;
- uint32_t end_mask = ~((~1) << end_region);
- result = start_mask & end_mask;
- // if end_region < start_region, the mask is ored.
- if (result == 0) result = start_mask | end_mask;
- }
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- uint32_t expected = 0;
- for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
- expected |= GetRegionMaskForAddress(a);
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+HeapObject* HeapObjectIterator::FromCurrentPage() {
+ while (cur_addr_ != cur_end_) {
+ if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+ cur_addr_ = space_->limit();
+ continue;
+ }
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_end_);
+ if (!obj->IsFiller()) {
+ ASSERT_OBJECT_SIZE(obj_size);
+ return obj;
}
- ASSERT(expected == result);
}
-#endif
- return result;
+ return NULL;
}
-void Page::MarkRegionDirty(Address address) {
- SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
-}
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+#ifdef ENABLE_HEAP_PROTECTION
-bool Page::IsRegionDirty(Address address) {
- return GetRegionMarks() & GetRegionMaskForAddress(address);
+void MemoryAllocator::Protect(Address start, size_t size) {
+ OS::Protect(start, size);
}
-void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
- int rstart = GetRegionNumberForAddress(start);
- int rend = GetRegionNumberForAddress(end);
-
- if (reaches_limit) {
- end += 1;
- }
-
- if ((rend - rstart) == 0) {
- return;
- }
-
- uint32_t bitmask = 0;
-
- if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
- || (start == ObjectAreaStart())) {
- // First region is fully covered
- bitmask = 1 << rstart;
- }
+void MemoryAllocator::Unprotect(Address start,
+ size_t size,
+ Executability executable) {
+ OS::Unprotect(start, size, executable);
+}
- while (++rstart < rend) {
- bitmask |= 1 << rstart;
- }
- if (bitmask) {
- SetRegionMarks(GetRegionMarks() & ~bitmask);
- }
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+ int id = GetChunkId(page);
+ OS::Protect(chunks_[id].address(), chunks_[id].size());
}
-void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
- heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+ int id = GetChunkId(page);
+ OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+ chunks_[id].owner()->executable() == EXECUTABLE);
}
+#endif
-bool Page::IsWatermarkValid() {
- return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
- heap_->page_watermark_invalidated_mark_;
-}
+// --------------------------------------------------------------------------
+// PagedSpace
+Page* Page::Initialize(Heap* heap,
+ MemoryChunk* chunk,
+ Executability executable,
+ PagedSpace* owner) {
+ Page* page = reinterpret_cast<Page*>(chunk);
+ ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+ ASSERT(chunk->owner() == owner);
+ owner->IncreaseCapacity(Page::kObjectAreaSize);
+ owner->Free(page->ObjectAreaStart(),
+ static_cast<int>(page->ObjectAreaEnd() -
+ page->ObjectAreaStart()));
-void Page::InvalidateWatermark(bool value) {
- if (value) {
- flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- heap_->page_watermark_invalidated_mark_;
- } else {
- flags_ =
- (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- (heap_->page_watermark_invalidated_mark_ ^
- (1 << WATERMARK_INVALIDATED));
- }
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
- ASSERT(IsWatermarkValid() == !value);
+ return page;
}
-bool Page::GetPageFlag(PageFlag flag) {
- return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
+bool PagedSpace::Contains(Address addr) {
+ Page* p = Page::FromAddress(addr);
+ if (!p->is_valid()) return false;
+ return p->owner() == this;
}
-void Page::SetPageFlag(PageFlag flag, bool value) {
- if (value) {
- flags_ |= static_cast<intptr_t>(1 << flag);
+void MemoryChunk::set_scan_on_scavenge(bool scan) {
+ if (scan) {
+ if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+ SetFlag(SCAN_ON_SCAVENGE);
} else {
- flags_ &= ~static_cast<intptr_t>(1 << flag);
+ if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+ ClearFlag(SCAN_ON_SCAVENGE);
}
-}
-
-
-void Page::ClearPageFlags() {
- flags_ = 0;
-}
-
-
-void Page::ClearGCFields() {
- InvalidateWatermark(true);
- SetAllocationWatermark(ObjectAreaStart());
- if (heap_->gc_state() == Heap::SCAVENGE) {
- SetCachedAllocationWatermark(ObjectAreaStart());
+ heap_->incremental_marking()->SetOldSpacePageFlags(this);
+}
+
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+ MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
+ OffsetFrom(addr) & ~Page::kPageAlignmentMask);
+ if (maybe->owner() != NULL) return maybe;
+ LargeObjectIterator iterator(HEAP->lo_space());
+ for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
+ // Fixed arrays are the only pointer-containing objects in large object
+ // space.
+ if (o->IsFixedArray()) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
+ if (chunk->Contains(addr)) {
+ return chunk;
+ }
+ }
}
- SetRegionMarks(kAllRegionsCleanMarks);
+ UNREACHABLE();
+ return NULL;
}
-bool Page::WasInUseBeforeMC() {
- return GetPageFlag(WAS_IN_USE_BEFORE_MC);
-}
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
+ : state_(kOldPointerState),
+ old_pointer_iterator_(heap->old_pointer_space()),
+ map_iterator_(heap->map_space()),
+ lo_iterator_(heap->lo_space()) { }
-void Page::SetWasInUseBeforeMC(bool was_in_use) {
- SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
-}
-
-
-bool Page::IsLargeObjectPage() {
- return !GetPageFlag(IS_NORMAL_PAGE);
-}
-
-
-void Page::SetIsLargeObjectPage(bool is_large_object_page) {
- SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
-}
-
-Executability Page::PageExecutability() {
- return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-}
-
-
-void Page::SetPageExecutability(Executability executable) {
- SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
- address_ = a;
- size_ = s;
- owner_ = o;
- executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
- owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
-}
-
-
-bool MemoryAllocator::IsValidChunk(int chunk_id) {
- if (!IsValidChunkId(chunk_id)) return false;
-
- ChunkInfo& c = chunks_[chunk_id];
- return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
-}
-
-
-bool MemoryAllocator::IsValidChunkId(int chunk_id) {
- return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
-}
-
-
-bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
- ASSERT(p->is_valid());
-
- int chunk_id = GetChunkId(p);
- if (!IsValidChunkId(chunk_id)) return false;
-
- ChunkInfo& c = chunks_[chunk_id];
- return (c.address() <= p->address()) &&
- (p->address() < c.address() + c.size()) &&
- (space == c.owner());
-}
-
-
-Page* MemoryAllocator::GetNextPage(Page* p) {
- ASSERT(p->is_valid());
- intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
- return Page::FromAddress(AddressFrom<Address>(raw_addr));
-}
-
-
-int MemoryAllocator::GetChunkId(Page* p) {
- ASSERT(p->is_valid());
- return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
-}
-
-
-void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
- ASSERT(prev->is_valid());
- int chunk_id = GetChunkId(prev);
- ASSERT_PAGE_ALIGNED(next->address());
- prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
+Page* Page::next_page() {
+ ASSERT(next_chunk()->owner() == owner());
+ return static_cast<Page*>(next_chunk());
}
-PagedSpace* MemoryAllocator::PageOwner(Page* page) {
- int chunk_id = GetChunkId(page);
- ASSERT(IsValidChunk(chunk_id));
- return chunks_[chunk_id].owner();
+Page* Page::prev_page() {
+ ASSERT(prev_chunk()->owner() == owner());
+ return static_cast<Page*>(prev_chunk());
}
-bool MemoryAllocator::InInitialChunk(Address address) {
- if (initial_chunk_ == NULL) return false;
-
- Address start = static_cast<Address>(initial_chunk_->address());
- return (start <= address) && (address < start + initial_chunk_->size());
+void Page::set_next_page(Page* page) {
+ ASSERT(page->owner() == owner());
+ set_next_chunk(page);
}
-// --------------------------------------------------------------------------
-// PagedSpace
-
-bool PagedSpace::Contains(Address addr) {
- Page* p = Page::FromAddress(addr);
- if (!p->is_valid()) return false;
- return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
+void Page::set_prev_page(Page* page) {
+ ASSERT(page->owner() == owner());
+ set_prev_chunk(page);
}
@@ -393,71 +251,72 @@ bool PagedSpace::Contains(Address addr) {
// not contain slow case logic (eg, move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
- int size_in_bytes) {
- Address current_top = alloc_info->top;
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+ Address current_top = allocation_info_.top;
Address new_top = current_top + size_in_bytes;
- if (new_top > alloc_info->limit) return NULL;
+ if (new_top > allocation_info_.limit) return NULL;
- alloc_info->top = new_top;
- ASSERT(alloc_info->VerifyPagedAllocation());
- accounting_stats_.AllocateBytes(size_in_bytes);
+ allocation_info_.top = new_top;
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
- ASSERT(HasBeenSetup());
- ASSERT_OBJECT_SIZE(size_in_bytes);
- HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
- if (object != NULL) return object;
+ HeapObject* object = AllocateLinearly(size_in_bytes);
+ if (object != NULL) {
+ if (identity() == CODE_SPACE) {
+ SkipList::Update(object->address(), size_in_bytes);
+ }
+ return object;
+ }
+
+ object = free_list_.Allocate(size_in_bytes);
+ if (object != NULL) {
+ if (identity() == CODE_SPACE) {
+ SkipList::Update(object->address(), size_in_bytes);
+ }
+ return object;
+ }
object = SlowAllocateRaw(size_in_bytes);
- if (object != NULL) return object;
+ if (object != NULL) {
+ if (identity() == CODE_SPACE) {
+ SkipList::Update(object->address(), size_in_bytes);
+ }
+ return object;
+ }
return Failure::RetryAfterGC(identity());
}
-// Reallocating (and promoting) objects during a compacting collection.
-MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
- ASSERT(HasBeenSetup());
- ASSERT_OBJECT_SIZE(size_in_bytes);
- HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
- if (object != NULL) return object;
-
- object = SlowMCAllocateRaw(size_in_bytes);
- if (object != NULL) return object;
+// -----------------------------------------------------------------------------
+// NewSpace
- return Failure::RetryAfterGC(identity());
-}
+MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
+ Address old_top = allocation_info_.top;
+ if (allocation_info_.limit - old_top < size_in_bytes) {
+ return SlowAllocateRaw(size_in_bytes);
+ }
-// -----------------------------------------------------------------------------
-// NewSpace
+ Object* obj = HeapObject::FromAddress(allocation_info_.top);
+ allocation_info_.top += size_in_bytes;
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
- AllocationInfo* alloc_info) {
- Address new_top = alloc_info->top + size_in_bytes;
- if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
-
- Object* obj = HeapObject::FromAddress(alloc_info->top);
- alloc_info->top = new_top;
-#ifdef DEBUG
- SemiSpace* space =
- (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
- ASSERT(space->low() <= alloc_info->top
- && alloc_info->top <= space->high()
- && alloc_info->limit == space->high());
-#endif
return obj;
}
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+ return static_cast<LargePage*>(chunk);
+}
+
+
intptr_t LargeObjectSpace::Available() {
- return LargeObjectChunk::ObjectSizeFor(
- heap()->isolate()->memory_allocator()->Available());
+ return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
}
@@ -467,16 +326,23 @@ void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
ASSERT(string->IsSeqString());
ASSERT(string->address() + StringType::SizeFor(string->length()) ==
allocation_info_.top);
+ Address old_top = allocation_info_.top;
allocation_info_.top =
string->address() + StringType::SizeFor(length);
string->set_length(length);
+ if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
+ int delta = static_cast<int>(old_top - allocation_info_.top);
+ MemoryChunk::IncrementLiveBytes(string->address(), -delta);
+ }
}
bool FreeListNode::IsFreeListNode(HeapObject* object) {
- return object->map() == HEAP->raw_unchecked_byte_array_map()
- || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
- || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
+ Map* map = object->map();
+ Heap* heap = object->GetHeap();
+ return map == heap->raw_unchecked_free_space_map()
+ || map == heap->raw_unchecked_one_pointer_filler_map()
+ || map == heap->raw_unchecked_two_pointer_filler_map();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 97c6d2ac1..1be81dde3 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -35,112 +35,87 @@
namespace v8 {
namespace internal {
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- ASSERT((space).low() <= (info).top \
- && (info).top <= (space).high() \
- && (info).limit == (space).high())
// ----------------------------------------------------------------------------
// HeapObjectIterator
HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
- Initialize(space->bottom(), space->top(), NULL);
+ // You can't actually iterate over the anchor page. It is not a real page,
+ // just an anchor for the double linked page list. Initialize as if we have
+ // reached the end of the anchor page, then the first iteration will move on
+ // to the first page.
+ Initialize(space,
+ NULL,
+ NULL,
+ kAllPagesInSpace,
+ NULL);
}
HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
HeapObjectCallback size_func) {
- Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
- Initialize(start, space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
- HeapObjectCallback size_func) {
- Initialize(start, space->top(), size_func);
+ // You can't actually iterate over the anchor page. It is not a real page,
+ // just an anchor for the double linked page list. Initialize the current
+ // address and end as NULL, then the first iteration will move on
+ // to the first page.
+ Initialize(space,
+ NULL,
+ NULL,
+ kAllPagesInSpace,
+ size_func);
}
HeapObjectIterator::HeapObjectIterator(Page* page,
HeapObjectCallback size_func) {
- Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
-}
-
-
-void HeapObjectIterator::Initialize(Address cur, Address end,
+ Space* owner = page->owner();
+ ASSERT(owner == HEAP->old_pointer_space() ||
+ owner == HEAP->old_data_space() ||
+ owner == HEAP->map_space() ||
+ owner == HEAP->cell_space() ||
+ owner == HEAP->code_space());
+ Initialize(reinterpret_cast<PagedSpace*>(owner),
+ page->ObjectAreaStart(),
+ page->ObjectAreaEnd(),
+ kOnePageOnly,
+ size_func);
+ ASSERT(page->WasSweptPrecisely());
+}
+
+
+void HeapObjectIterator::Initialize(PagedSpace* space,
+ Address cur, Address end,
+ HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
+ // Check that we actually can iterate this space.
+ ASSERT(!space->was_swept_conservatively());
+
+ space_ = space;
cur_addr_ = cur;
- end_addr_ = end;
- end_page_ = Page::FromAllocationTop(end);
+ cur_end_ = end;
+ page_mode_ = mode;
size_func_ = size_f;
- Page* p = Page::FromAllocationTop(cur_addr_);
- cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
-
-#ifdef DEBUG
- Verify();
-#endif
}
-HeapObject* HeapObjectIterator::FromNextPage() {
- if (cur_addr_ == end_addr_) return NULL;
-
- Page* cur_page = Page::FromAllocationTop(cur_addr_);
+// We have hit the end of the page and should advance to the next block of
+// objects. This happens at the end of the page.
+bool HeapObjectIterator::AdvanceToNextPage() {
+ ASSERT(cur_addr_ == cur_end_);
+ if (page_mode_ == kOnePageOnly) return false;
+ Page* cur_page;
+ if (cur_addr_ == NULL) {
+ cur_page = space_->anchor();
+ } else {
+ cur_page = Page::FromAddress(cur_addr_ - 1);
+ ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
+ }
cur_page = cur_page->next_page();
- ASSERT(cur_page->is_valid());
-
+ if (cur_page == space_->anchor()) return false;
cur_addr_ = cur_page->ObjectAreaStart();
- cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
-
- if (cur_addr_ == end_addr_) return NULL;
- ASSERT(cur_addr_ < cur_limit_);
-#ifdef DEBUG
- Verify();
-#endif
- return FromCurrentPage();
-}
-
-
-#ifdef DEBUG
-void HeapObjectIterator::Verify() {
- Page* p = Page::FromAllocationTop(cur_addr_);
- ASSERT(p == Page::FromAllocationTop(cur_limit_));
- ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
- prev_page_ = NULL;
- switch (mode) {
- case PAGES_IN_USE:
- stop_page_ = space->AllocationTopPage();
- break;
- case PAGES_USED_BY_MC:
- stop_page_ = space->MCRelocationTopPage();
- break;
- case ALL_PAGES:
-#ifdef DEBUG
- // Verify that the cached last page in the space is actually the
- // last page.
- for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
- if (!p->next_page()->is_valid()) {
- ASSERT(space->last_page_ == p);
- }
- }
-#endif
- stop_page_ = space->last_page_;
- break;
- }
+ cur_end_ = cur_page->ObjectAreaEnd();
+ ASSERT(cur_page->WasSweptPrecisely());
+ return true;
}
@@ -171,7 +146,12 @@ bool CodeRange::Setup(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(code_range_->size() == requested);
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+ Address base = reinterpret_cast<Address>(code_range_->address());
+ Address aligned_base =
+ RoundUp(reinterpret_cast<Address>(code_range_->address()),
+ MemoryChunk::kAlignment);
+ size_t size = code_range_->size() - (aligned_base - base);
+ allocation_list_.Add(FreeBlock(aligned_base, size));
current_allocation_block_index_ = 0;
return true;
}
@@ -228,7 +208,8 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
-void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+Address CodeRange::AllocateRawMemory(const size_t requested,
+ size_t* allocated) {
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
@@ -236,13 +217,16 @@ void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
GetNextAllocationBlock(requested);
}
// Commit the requested memory at the start of the current allocation block.
- *allocated = RoundUp(requested, Page::kPageSize);
+ size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
FreeBlock current = allocation_list_[current_allocation_block_index_];
- if (*allocated >= current.size - Page::kPageSize) {
+ if (aligned_requested >= (current.size - Page::kPageSize)) {
// Don't leave a small free block, useless for a large object or chunk.
*allocated = current.size;
+ } else {
+ *allocated = aligned_requested;
}
ASSERT(*allocated <= current.size);
+ ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!code_range_->Commit(current.start, *allocated, true)) {
*allocated = 0;
return NULL;
@@ -256,7 +240,8 @@ void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
}
-void CodeRange::FreeRawMemory(void* address, size_t length) {
+void CodeRange::FreeRawMemory(Address address, size_t length) {
+ ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length);
}
@@ -274,35 +259,12 @@ void CodeRange::TearDown() {
// MemoryAllocator
//
-// 270 is an estimate based on the static default heap size of a pair of 256K
-// semispaces and a 64M old generation.
-const int kEstimatedNumberOfChunks = 270;
-
-
MemoryAllocator::MemoryAllocator(Isolate* isolate)
: isolate_(isolate),
capacity_(0),
capacity_executable_(0),
size_(0),
- size_executable_(0),
- initial_chunk_(NULL),
- chunks_(kEstimatedNumberOfChunks),
- free_chunk_ids_(kEstimatedNumberOfChunks),
- max_nof_chunks_(0),
- top_(0) {
-}
-
-
-void MemoryAllocator::Push(int free_chunk_id) {
- ASSERT(max_nof_chunks_ > 0);
- ASSERT(top_ < max_nof_chunks_);
- free_chunk_ids_[top_++] = free_chunk_id;
-}
-
-
-int MemoryAllocator::Pop() {
- ASSERT(top_ > 0);
- return free_chunk_ids_[--top_];
+ size_executable_(0) {
}
@@ -311,269 +273,303 @@ bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
ASSERT_GE(capacity_, capacity_executable_);
- // Over-estimate the size of chunks_ array. It assumes the expansion of old
- // space is always in the unit of a chunk (kChunkSize) except the last
- // expansion.
- //
- // Due to alignment, allocated space might be one page less than required
- // number (kPagesPerChunk) of pages for old spaces.
- //
- // Reserve two chunk ids for semispaces, one for map space, one for old
- // space, and one for code space.
- max_nof_chunks_ =
- static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
- if (max_nof_chunks_ > kMaxNofChunks) return false;
-
size_ = 0;
size_executable_ = 0;
- ChunkInfo info; // uninitialized element.
- for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
- chunks_.Add(info);
- free_chunk_ids_.Add(i);
- }
- top_ = max_nof_chunks_;
+
return true;
}
void MemoryAllocator::TearDown() {
- for (int i = 0; i < max_nof_chunks_; i++) {
- if (chunks_[i].address() != NULL) DeleteChunk(i);
- }
- chunks_.Clear();
- free_chunk_ids_.Clear();
-
- if (initial_chunk_ != NULL) {
- LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
- delete initial_chunk_;
- initial_chunk_ = NULL;
- }
-
- ASSERT(top_ == max_nof_chunks_); // all chunks are free
- top_ = 0;
+ // Check that spaces were torn down before MemoryAllocator.
+ ASSERT(size_ == 0);
+ // TODO(gc) this will be true again when we fix FreeMemory.
+ // ASSERT(size_executable_ == 0);
capacity_ = 0;
capacity_executable_ = 0;
- size_ = 0;
- max_nof_chunks_ = 0;
}
-void* MemoryAllocator::AllocateRawMemory(const size_t requested,
- size_t* allocated,
- Executability executable) {
- if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
- return NULL;
- }
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
+ Executability executable) {
+ // TODO(gc) make code_range part of memory allocator?
+ ASSERT(reservation->IsReserved());
+ size_t size = reservation->size();
+ ASSERT(size_ >= size);
+ size_ -= size;
+
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
- void* mem;
if (executable == EXECUTABLE) {
- // Check executable memory limit.
- if (size_executable_ + requested >
- static_cast<size_t>(capacity_executable_)) {
- LOG(isolate_,
- StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
- return NULL;
- }
- // Allocate executable memory either from code range or from the
- // OS.
- if (isolate_->code_range()->exists()) {
- mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
- } else {
- mem = OS::Allocate(requested, allocated, true);
- }
- // Update executable memory size.
- size_executable_ += static_cast<int>(*allocated);
- } else {
- mem = OS::Allocate(requested, allocated, false);
+ ASSERT(size_executable_ >= size);
+ size_executable_ -= size;
}
- int alloced = static_cast<int>(*allocated);
- size_ += alloced;
-
-#ifdef DEBUG
- ZapBlock(reinterpret_cast<Address>(mem), alloced);
-#endif
- isolate_->counters()->memory_allocated()->Increment(alloced);
- return mem;
+ // Code which is part of the code-range does not have its own VirtualMemory.
+ ASSERT(!isolate_->code_range()->contains(
+ static_cast<Address>(reservation->address())));
+ ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+ reservation->Release();
}
-void MemoryAllocator::FreeRawMemory(void* mem,
- size_t length,
- Executability executable) {
-#ifdef DEBUG
- // Do not try to zap the guard page.
- size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
- ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
-#endif
- if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
- isolate_->code_range()->FreeRawMemory(mem, length);
+void MemoryAllocator::FreeMemory(Address base,
+ size_t size,
+ Executability executable) {
+ // TODO(gc) make code_range part of memory allocator?
+ ASSERT(size_ >= size);
+ size_ -= size;
+
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+ if (executable == EXECUTABLE) {
+ ASSERT(size_executable_ >= size);
+ size_executable_ -= size;
+ }
+ if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+ ASSERT(executable == EXECUTABLE);
+ isolate_->code_range()->FreeRawMemory(base, size);
} else {
- OS::Free(mem, length);
+ ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+ bool result = VirtualMemory::ReleaseRegion(base, size);
+ USE(result);
+ ASSERT(result);
}
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
- size_ -= static_cast<int>(length);
- if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+}
- ASSERT(size_ >= 0);
- ASSERT(size_executable_ >= 0);
+
+Address MemoryAllocator::ReserveAlignedMemory(size_t size,
+ size_t alignment,
+ VirtualMemory* controller) {
+ VirtualMemory reservation(size, alignment);
+
+ if (!reservation.IsReserved()) return NULL;
+ size_ += reservation.size();
+ Address base = RoundUp(static_cast<Address>(reservation.address()),
+ alignment);
+ controller->TakeControl(&reservation);
+ return base;
}
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
- if ((registration.space & space) == space &&
- (registration.action & action) == action)
- registration.callback(space, action, static_cast<int>(size));
+Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+ size_t alignment,
+ Executability executable,
+ VirtualMemory* controller) {
+ VirtualMemory reservation;
+ Address base = ReserveAlignedMemory(size, alignment, &reservation);
+ if (base == NULL) return NULL;
+ if (!reservation.Commit(base,
+ size,
+ executable == EXECUTABLE)) {
+ return NULL;
}
+ controller->TakeControl(&reservation);
+ return base;
}
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) return true;
- }
- return false;
+void Page::InitializeAsAnchor(PagedSpace* owner) {
+ set_owner(owner);
+ set_prev_page(this);
+ set_next_page(this);
}
-void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- ASSERT(callback != NULL);
- MemoryAllocationCallbackRegistration registration(callback, space, action);
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
- return memory_allocation_callbacks_.Add(registration);
+NewSpacePage* NewSpacePage::Initialize(Heap* heap,
+ Address start,
+ SemiSpace* semi_space) {
+ MemoryChunk* chunk = MemoryChunk::Initialize(heap,
+ start,
+ Page::kPageSize,
+ NOT_EXECUTABLE,
+ semi_space);
+ chunk->set_next_chunk(NULL);
+ chunk->set_prev_chunk(NULL);
+ chunk->initialize_scan_on_scavenge(true);
+ bool in_to_space = (semi_space->id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+ : MemoryChunk::IN_FROM_SPACE);
+ ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+ : MemoryChunk::IN_TO_SPACE));
+ NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+ heap->incremental_marking()->SetNewSpacePageFlags(page);
+ return page;
}
-void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) {
- memory_allocation_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
+void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
+ set_owner(semi_space);
+ set_next_chunk(this);
+ set_prev_chunk(this);
+ // Flags marks this invalid page as not being in new-space.
+ // All real new-space pages will be in new-space.
+ SetFlags(0, ~0);
}
-void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
- ASSERT(initial_chunk_ == NULL);
- initial_chunk_ = new VirtualMemory(requested);
- CHECK(initial_chunk_ != NULL);
- if (!initial_chunk_->IsReserved()) {
- delete initial_chunk_;
- initial_chunk_ = NULL;
- return NULL;
- }
+MemoryChunk* MemoryChunk::Initialize(Heap* heap,
+ Address base,
+ size_t size,
+ Executability executable,
+ Space* owner) {
+ MemoryChunk* chunk = FromAddress(base);
- // We are sure that we have mapped a block of requested addresses.
- ASSERT(initial_chunk_->size() == requested);
- LOG(isolate_,
- NewEvent("InitialChunk", initial_chunk_->address(), requested));
- size_ += static_cast<int>(requested);
- return initial_chunk_->address();
-}
+ ASSERT(base == chunk->address());
+ chunk->heap_ = heap;
+ chunk->size_ = size;
+ chunk->flags_ = 0;
+ chunk->set_owner(owner);
+ chunk->InitializeReservedMemory();
+ chunk->slots_buffer_ = NULL;
+ chunk->skip_list_ = NULL;
+ chunk->ResetLiveBytes();
+ Bitmap::Clear(chunk);
+ chunk->initialize_scan_on_scavenge(false);
+ chunk->SetFlag(WAS_SWEPT_PRECISELY);
+
+ ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+ ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
+
+ if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
-static int PagesInChunk(Address start, size_t size) {
- // The first page starts on the first page-aligned address from start onward
- // and the last page ends on the last page-aligned address before
- // start+size. Page::kPageSize is a power of two so we can divide by
- // shifting.
- return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
+ if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
+
+ return chunk;
}
-Page* MemoryAllocator::AllocatePages(int requested_pages,
- int* allocated_pages,
- PagedSpace* owner) {
- if (requested_pages <= 0) return Page::FromAddress(NULL);
- size_t chunk_size = requested_pages * Page::kPageSize;
+void MemoryChunk::InsertAfter(MemoryChunk* other) {
+ next_chunk_ = other->next_chunk_;
+ prev_chunk_ = other;
+ other->next_chunk_->prev_chunk_ = this;
+ other->next_chunk_ = this;
+}
- void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
- if (chunk == NULL) return Page::FromAddress(NULL);
- LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
- *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+void MemoryChunk::Unlink() {
+ if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
+ heap_->decrement_scan_on_scavenge_pages();
+ ClearFlag(SCAN_ON_SCAVENGE);
+ }
+ next_chunk_->prev_chunk_ = prev_chunk_;
+ prev_chunk_->next_chunk_ = next_chunk_;
+ prev_chunk_ = NULL;
+ next_chunk_ = NULL;
+}
+
- // We may 'lose' a page due to alignment.
- ASSERT(*allocated_pages >= kPagesPerChunk - 1);
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+ Executability executable,
+ Space* owner) {
+ size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+ Heap* heap = isolate_->heap();
+ Address base = NULL;
+ VirtualMemory reservation;
+ if (executable == EXECUTABLE) {
+ // Check executable memory limit.
+ if (size_executable_ + chunk_size > capacity_executable_) {
+ LOG(isolate_,
+ StringEvent("MemoryAllocator::AllocateRawMemory",
+ "V8 Executable Allocation capacity exceeded"));
+ return NULL;
+ }
- size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
+ // Allocate executable memory either from code range or from the
+ // OS.
+ if (isolate_->code_range()->exists()) {
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
+ MemoryChunk::kAlignment));
+ if (base == NULL) return NULL;
+ size_ += chunk_size;
+ // Update executable memory size.
+ size_executable_ += chunk_size;
+ } else {
+ base = AllocateAlignedMemory(chunk_size,
+ MemoryChunk::kAlignment,
+ executable,
+ &reservation);
+ if (base == NULL) return NULL;
+ // Update executable memory size.
+ size_executable_ += reservation.size();
+ }
+ } else {
+ base = AllocateAlignedMemory(chunk_size,
+ MemoryChunk::kAlignment,
+ executable,
+ &reservation);
- // Check that we got at least one page that we can use.
- if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
- FreeRawMemory(chunk,
- chunk_size,
- owner->executable());
- LOG(isolate_, DeleteEvent("PagedChunk", chunk));
- return Page::FromAddress(NULL);
+ if (base == NULL) return NULL;
}
- if (guard_size != 0) {
- OS::Guard(chunk, guard_size);
- chunk_size -= guard_size;
- chunk = static_cast<Address>(chunk) + guard_size;
- --*allocated_pages;
+#ifdef DEBUG
+ ZapBlock(base, chunk_size);
+#endif
+ isolate_->counters()->memory_allocated()->
+ Increment(static_cast<int>(chunk_size));
+
+ LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
+ if (owner != NULL) {
+ ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+ PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
}
- int chunk_id = Pop();
- chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
+ MemoryChunk* result = MemoryChunk::Initialize(heap,
+ base,
+ chunk_size,
+ executable,
+ owner);
+ result->set_reserved_memory(&reservation);
+ return result;
+}
+
+
+Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
+ Executability executable) {
+ MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
- Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+ if (chunk == NULL) return NULL;
- return new_pages;
+ return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
-Page* MemoryAllocator::CommitPages(Address start, size_t size,
- PagedSpace* owner, int* num_pages) {
- ASSERT(start != NULL);
- *num_pages = PagesInChunk(start, size);
- ASSERT(*num_pages > 0);
- ASSERT(initial_chunk_ != NULL);
- ASSERT(InInitialChunk(start));
- ASSERT(InInitialChunk(start + size - 1));
- if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
- return Page::FromAddress(NULL);
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+ Executability executable,
+ Space* owner) {
+ MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+ if (chunk == NULL) return NULL;
+ return LargePage::Initialize(isolate_->heap(), chunk);
+}
+
+
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+ LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+ if (chunk->owner() != NULL) {
+ ObjectSpace space =
+ static_cast<ObjectSpace>(1 << chunk->owner()->identity());
+ PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
}
-#ifdef DEBUG
- ZapBlock(start, size);
-#endif
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
- // So long as we correctly overestimated the number of chunks we should not
- // run out of chunk ids.
- CHECK(!OutOfChunkIds());
- int chunk_id = Pop();
- chunks_[chunk_id].init(start, size, owner);
- return InitializePagesInChunk(chunk_id, *num_pages, owner);
+ delete chunk->slots_buffer();
+ delete chunk->skip_list();
+
+ VirtualMemory* reservation = chunk->reserved_memory();
+ if (reservation->IsReserved()) {
+ FreeMemory(reservation, chunk->executable());
+ } else {
+ FreeMemory(chunk->address(),
+ chunk->size(),
+ chunk->executable());
+ }
}
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
- ASSERT(start != NULL);
- ASSERT(size > 0);
- ASSERT(initial_chunk_ != NULL);
- ASSERT(InInitialChunk(start));
- ASSERT(InInitialChunk(start + size - 1));
-
- if (!initial_chunk_->Commit(start, size, executable)) return false;
+ if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
#ifdef DEBUG
ZapBlock(start, size);
#endif
@@ -583,13 +579,7 @@ bool MemoryAllocator::CommitBlock(Address start,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- ASSERT(start != NULL);
- ASSERT(size > 0);
- ASSERT(initial_chunk_ != NULL);
- ASSERT(InInitialChunk(start));
- ASSERT(InInitialChunk(start + size - 1));
-
- if (!initial_chunk_->Uncommit(start, size)) return false;
+ if (!VirtualMemory::UncommitRegion(start, size)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -602,130 +592,49 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) {
}
-Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner) {
- ASSERT(IsValidChunk(chunk_id));
- ASSERT(pages_in_chunk > 0);
-
- Address chunk_start = chunks_[chunk_id].address();
-
- Address low = RoundUp(chunk_start, Page::kPageSize);
-
-#ifdef DEBUG
- size_t chunk_size = chunks_[chunk_id].size();
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
- ASSERT(pages_in_chunk <=
- ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
-#endif
-
- Address page_addr = low;
- for (int i = 0; i < pages_in_chunk; i++) {
- Page* p = Page::FromAddress(page_addr);
- p->heap_ = owner->heap();
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
- p->InvalidateWatermark(true);
- p->SetIsLargeObjectPage(false);
- p->SetAllocationWatermark(p->ObjectAreaStart());
- p->SetCachedAllocationWatermark(p->ObjectAreaStart());
- page_addr += Page::kPageSize;
+void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size) {
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ MemoryAllocationCallbackRegistration registration =
+ memory_allocation_callbacks_[i];
+ if ((registration.space & space) == space &&
+ (registration.action & action) == action)
+ registration.callback(space, action, static_cast<int>(size));
}
-
- // Set the next page of the last page to 0.
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
- return Page::FromAddress(low);
}
-Page* MemoryAllocator::FreePages(Page* p) {
- if (!p->is_valid()) return p;
-
- // Find the first page in the same chunk as 'p'
- Page* first_page = FindFirstPageInSameChunk(p);
- Page* page_to_return = Page::FromAddress(NULL);
-
- if (p != first_page) {
- // Find the last page in the same chunk as 'prev'.
- Page* last_page = FindLastPageInSameChunk(p);
- first_page = GetNextPage(last_page); // first page in next chunk
-
- // set the next_page of last_page to NULL
- SetNextPage(last_page, Page::FromAddress(NULL));
- page_to_return = p; // return 'p' when exiting
- }
-
- while (first_page->is_valid()) {
- int chunk_id = GetChunkId(first_page);
- ASSERT(IsValidChunk(chunk_id));
-
- // Find the first page of the next chunk before deleting this chunk.
- first_page = GetNextPage(FindLastPageInSameChunk(first_page));
-
- // Free the current chunk.
- DeleteChunk(chunk_id);
+bool MemoryAllocator::MemoryAllocationCallbackRegistered(
+ MemoryAllocationCallback callback) {
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ if (memory_allocation_callbacks_[i].callback == callback) return true;
}
-
- return page_to_return;
+ return false;
}
-void MemoryAllocator::FreeAllPages(PagedSpace* space) {
- for (int i = 0, length = chunks_.length(); i < length; i++) {
- if (chunks_[i].owner() == space) {
- DeleteChunk(i);
- }
- }
+void MemoryAllocator::AddMemoryAllocationCallback(
+ MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action) {
+ ASSERT(callback != NULL);
+ MemoryAllocationCallbackRegistration registration(callback, space, action);
+ ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
+ return memory_allocation_callbacks_.Add(registration);
}
-void MemoryAllocator::DeleteChunk(int chunk_id) {
- ASSERT(IsValidChunk(chunk_id));
-
- ChunkInfo& c = chunks_[chunk_id];
-
- // We cannot free a chunk contained in the initial chunk because it was not
- // allocated with AllocateRawMemory. Instead we uncommit the virtual
- // memory.
- if (InInitialChunk(c.address())) {
- // TODO(1240712): VirtualMemory::Uncommit has a return value which
- // is ignored here.
- initial_chunk_->Uncommit(c.address(), c.size());
- Counters* counters = isolate_->counters();
- counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
- } else {
- LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
- ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
- size_t size = c.size();
- size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
- FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
- PerformAllocationCallback(space, kAllocationActionFree, size);
+void MemoryAllocator::RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback) {
+ ASSERT(callback != NULL);
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ if (memory_allocation_callbacks_[i].callback == callback) {
+ memory_allocation_callbacks_.Remove(i);
+ return;
+ }
}
- c.init(NULL, 0, NULL);
- Push(chunk_id);
-}
-
-
-Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
- int chunk_id = GetChunkId(p);
- ASSERT(IsValidChunk(chunk_id));
-
- Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
- return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
- int chunk_id = GetChunkId(p);
- ASSERT(IsValidChunk(chunk_id));
-
- Address chunk_start = chunks_[chunk_id].address();
- size_t chunk_size = chunks_[chunk_id].size();
-
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
- ASSERT(chunk_start <= p->address() && p->address() < high);
-
- return Page::FromAddress(high - Page::kPageSize);
+ UNREACHABLE();
}
@@ -739,75 +648,6 @@ void MemoryAllocator::ReportStatistics() {
}
#endif
-
-void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
- Page** first_page,
- Page** last_page,
- Page** last_page_in_use) {
- Page* first = NULL;
- Page* last = NULL;
-
- for (int i = 0, length = chunks_.length(); i < length; i++) {
- ChunkInfo& chunk = chunks_[i];
-
- if (chunk.owner() == space) {
- if (first == NULL) {
- Address low = RoundUp(chunk.address(), Page::kPageSize);
- first = Page::FromAddress(low);
- }
- last = RelinkPagesInChunk(i,
- chunk.address(),
- chunk.size(),
- last,
- last_page_in_use);
- }
- }
-
- if (first_page != NULL) {
- *first_page = first;
- }
-
- if (last_page != NULL) {
- *last_page = last;
- }
-}
-
-
-Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
- Address chunk_start,
- size_t chunk_size,
- Page* prev,
- Page** last_page_in_use) {
- Address page_addr = RoundUp(chunk_start, Page::kPageSize);
- int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
-
- if (prev->is_valid()) {
- SetNextPage(prev, Page::FromAddress(page_addr));
- }
-
- for (int i = 0; i < pages_in_chunk; i++) {
- Page* p = Page::FromAddress(page_addr);
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
- page_addr += Page::kPageSize;
-
- p->InvalidateWatermark(true);
- if (p->WasInUseBeforeMC()) {
- *last_page_in_use = p;
- }
- }
-
- // Set the next page of the last page to 0.
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
- if (last_page->WasInUseBeforeMC()) {
- *last_page_in_use = last_page;
- }
-
- return last_page;
-}
-
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -815,7 +655,10 @@ PagedSpace::PagedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : Space(heap, id, executable) {
+ : Space(heap, id, executable),
+ free_list_(this),
+ was_swept_conservatively_(false),
+ first_unswept_page_(Page::FromAddress(NULL)) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@@ -823,288 +666,150 @@ PagedSpace::PagedSpace(Heap* heap,
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
- mc_forwarding_info_.top = NULL;
- mc_forwarding_info_.limit = NULL;
+ anchor_.InitializeAsAnchor(this);
}
-bool PagedSpace::Setup(Address start, size_t size) {
- if (HasBeenSetup()) return false;
-
- int num_pages = 0;
- // Try to use the virtual memory range passed to us. If it is too small to
- // contain at least one page, ignore it and allocate instead.
- int pages_in_chunk = PagesInChunk(start, size);
- if (pages_in_chunk > 0) {
- first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
- RoundUp(start, Page::kPageSize),
- Page::kPageSize * pages_in_chunk,
- this, &num_pages);
- } else {
- int requested_pages =
- Min(MemoryAllocator::kPagesPerChunk,
- static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
- first_page_ =
- Isolate::Current()->memory_allocator()->AllocatePages(
- requested_pages, &num_pages, this);
- if (!first_page_->is_valid()) return false;
- }
-
- // We are sure that the first page is valid and that we have at least one
- // page.
- ASSERT(first_page_->is_valid());
- ASSERT(num_pages > 0);
- accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
- ASSERT(Capacity() <= max_capacity_);
-
- // Sequentially clear region marks in the newly allocated
- // pages and cache the current last page in the space.
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
- last_page_ = p;
- }
-
- // Use first_page_ for allocation.
- SetAllocationInfo(&allocation_info_, first_page_);
-
- page_list_is_chunk_ordered_ = true;
-
+bool PagedSpace::Setup() {
return true;
}
bool PagedSpace::HasBeenSetup() {
- return (Capacity() > 0);
+ return true;
}
void PagedSpace::TearDown() {
- Isolate::Current()->memory_allocator()->FreeAllPages(this);
- first_page_ = NULL;
- accounting_stats_.Clear();
-}
-
-
-void PagedSpace::MarkAllPagesClean() {
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ PageIterator iterator(this);
+ while (iterator.has_next()) {
+ heap()->isolate()->memory_allocator()->Free(iterator.next());
}
+ anchor_.set_next_page(&anchor_);
+ anchor_.set_prev_page(&anchor_);
+ accounting_stats_.Clear();
}
MaybeObject* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called before or after mark-compact GC
- // because it accesses map pointers.
+ // Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Failure::Exception();
Page* p = Page::FromAddress(addr);
- ASSERT(IsUsed(p));
- Address cur = p->ObjectAreaStart();
- Address end = p->AllocationTop();
- while (cur < end) {
- HeapObject* obj = HeapObject::FromAddress(cur);
+ HeapObjectIterator it(p, NULL);
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ Address cur = obj->address();
Address next = cur + obj->Size();
if ((cur <= addr) && (addr < next)) return obj;
- cur = next;
}
UNREACHABLE();
return Failure::Exception();
}
+bool PagedSpace::CanExpand() {
+ ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
+ ASSERT(Capacity() % Page::kObjectAreaSize == 0);
-bool PagedSpace::IsUsed(Page* page) {
- PageIterator it(this, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- if (page == it.next()) return true;
- }
- return false;
-}
-
-
-void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
- alloc_info->top = p->ObjectAreaStart();
- alloc_info->limit = p->ObjectAreaEnd();
- ASSERT(alloc_info->VerifyPagedAllocation());
-}
-
-
-void PagedSpace::MCResetRelocationInfo() {
- // Set page indexes.
- int i = 0;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- Page* p = it.next();
- p->mc_page_index = i++;
- }
-
- // Set mc_forwarding_info_ to the first page in the space.
- SetAllocationInfo(&mc_forwarding_info_, first_page_);
- // All the bytes in the space are 'available'. We will rediscover
- // allocated and wasted bytes during GC.
- accounting_stats_.Reset();
-}
-
-
-int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
-#ifdef DEBUG
- // The Contains function considers the address at the beginning of a
- // page in the page, MCSpaceOffsetForAddress considers it is in the
- // previous page.
- if (Page::IsAlignedToPageSize(addr)) {
- ASSERT(Contains(addr - kPointerSize));
- } else {
- ASSERT(Contains(addr));
- }
-#endif
-
- // If addr is at the end of a page, it belongs to previous page
- Page* p = Page::IsAlignedToPageSize(addr)
- ? Page::FromAllocationTop(addr)
- : Page::FromAddress(addr);
- int index = p->mc_page_index;
- return (index * Page::kPageSize) + p->Offset(addr);
-}
+ if (Capacity() == max_capacity_) return false;
+ ASSERT(Capacity() < max_capacity_);
-// Slow case for reallocating and promoting objects during a compacting
-// collection. This function is not space-specific.
-HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
- Page* current_page = TopPageOf(mc_forwarding_info_);
- if (!current_page->next_page()->is_valid()) {
- if (!Expand(current_page)) {
- return NULL;
- }
- }
+ // Are we going to exceed capacity for this space?
+ if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
- // There are surely more pages in the space now.
- ASSERT(current_page->next_page()->is_valid());
- // We do not add the top of page block for current page to the space's
- // free list---the block may contain live objects so we cannot write
- // bookkeeping information to it. Instead, we will recover top of page
- // blocks when we move objects to their new locations.
- //
- // We do however write the allocation pointer to the page. The encoding
- // of forwarding addresses is as an offset in terms of live bytes, so we
- // need quick access to the allocation top of each page to decode
- // forwarding addresses.
- current_page->SetAllocationWatermark(mc_forwarding_info_.top);
- current_page->next_page()->InvalidateWatermark(true);
- SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
- return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+ return true;
}
+bool PagedSpace::Expand() {
+ if (!CanExpand()) return false;
-bool PagedSpace::Expand(Page* last_page) {
- ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
-
- if (Capacity() == max_capacity_) return false;
+ Page* p = heap()->isolate()->memory_allocator()->
+ AllocatePage(this, executable());
+ if (p == NULL) return false;
- ASSERT(Capacity() < max_capacity_);
- // Last page must be valid and its next page is invalid.
- ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
-
- int available_pages =
- static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
- // We don't want to have to handle small chunks near the end so if there are
- // not kPagesPerChunk pages available without exceeding the max capacity then
- // act as if memory has run out.
- if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
-
- int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
- Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
- desired_pages, &desired_pages, this);
- if (!p->is_valid()) return false;
-
- accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
-
- // Sequentially clear region marks of new pages and and cache the
- // new last page in the space.
- while (p->is_valid()) {
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
- last_page_ = p;
- p = p->next_page();
- }
+ p->InsertAfter(anchor_.prev_page());
return true;
}
-#ifdef DEBUG
int PagedSpace::CountTotalPages() {
+ PageIterator it(this);
int count = 0;
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+ while (it.has_next()) {
+ it.next();
count++;
}
return count;
}
-#endif
-void PagedSpace::Shrink() {
- if (!page_list_is_chunk_ordered_) {
- // We can't shrink space if pages is not chunk-ordered
- // (see comment for class MemoryAllocator for definition).
- return;
- }
+void PagedSpace::ReleasePage(Page* page) {
+ ASSERT(page->LiveBytes() == 0);
- // Release half of free pages.
- Page* top_page = AllocationTopPage();
- ASSERT(top_page->is_valid());
+ // Adjust list of unswept pages if the page is it's head or tail.
+ if (first_unswept_page_ == page) {
+ first_unswept_page_ = page->next_page();
+ if (first_unswept_page_ == anchor()) {
+ first_unswept_page_ = Page::FromAddress(NULL);
+ }
+ }
- // Count the number of pages we would like to free.
- int pages_to_free = 0;
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
- pages_to_free++;
+ if (page->WasSwept()) {
+ intptr_t size = free_list_.EvictFreeListItems(page);
+ accounting_stats_.AllocateBytes(size);
+ ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
}
- // Free pages after top_page.
- Page* p = heap()->isolate()->memory_allocator()->
- FreePages(top_page->next_page());
- heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
+ if (Page::FromAllocationTop(allocation_info_.top) == page) {
+ allocation_info_.top = allocation_info_.limit = NULL;
+ }
- // Find out how many pages we failed to free and update last_page_.
- // Please note pages can only be freed in whole chunks.
- last_page_ = top_page;
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
- pages_to_free--;
- last_page_ = p;
+ page->Unlink();
+ if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+ heap()->isolate()->memory_allocator()->Free(page);
+ } else {
+ heap()->QueueMemoryChunkForFree(page);
}
- accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
- ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
+ ASSERT(Capacity() > 0);
+ ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+ accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
}
-bool PagedSpace::EnsureCapacity(int capacity) {
- if (Capacity() >= capacity) return true;
-
- // Start from the allocation top and loop to the last page in the space.
- Page* last_page = AllocationTopPage();
- Page* next_page = last_page->next_page();
- while (next_page->is_valid()) {
- last_page = heap()->isolate()->memory_allocator()->
- FindLastPageInSameChunk(next_page);
- next_page = last_page->next_page();
+void PagedSpace::ReleaseAllUnusedPages() {
+ PageIterator it(this);
+ while (it.has_next()) {
+ Page* page = it.next();
+ if (!page->WasSwept()) {
+ if (page->LiveBytes() == 0) ReleasePage(page);
+ } else {
+ HeapObject* obj = HeapObject::FromAddress(page->body());
+ if (obj->IsFreeSpace() &&
+ FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
+ // Sometimes we allocate memory from free list but don't
+ // immediately initialize it (e.g. see PagedSpace::ReserveSpace
+ // called from Heap::ReserveSpace that can cause GC before
+ // reserved space is actually initialized).
+ // Thus we can't simply assume that obj represents a valid
+ // node still owned by a free list
+ // Instead we should verify that the page is fully covered
+ // by free list items.
+ FreeList::SizeStats sizes;
+ free_list_.CountFreeListItems(page, &sizes);
+ if (sizes.Total() == Page::kObjectAreaSize) {
+ ReleasePage(page);
+ }
+ }
+ }
}
-
- // Expand the space until it has the required capacity or expansion fails.
- do {
- if (!Expand(last_page)) return false;
- ASSERT(last_page->next_page()->is_valid());
- last_page =
- heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
- last_page->next_page());
- } while (Capacity() < capacity);
-
- return true;
+ heap()->FreeQueuedChunks();
}
@@ -1114,61 +819,52 @@ void PagedSpace::Print() { }
#ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
void PagedSpace::Verify(ObjectVisitor* visitor) {
- // The allocation pointer should be valid, and it should be in a page in the
- // space.
- ASSERT(allocation_info_.VerifyPagedAllocation());
- Page* top_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
-
- // Loop over all the pages.
- bool above_allocation_top = false;
- Page* current_page = first_page_;
- while (current_page->is_valid()) {
- if (above_allocation_top) {
- // We don't care what's above the allocation top.
- } else {
- Address top = current_page->AllocationTop();
- if (current_page == top_page) {
- ASSERT(top == allocation_info_.top);
- // The next page will be above the allocation top.
- above_allocation_top = true;
- }
-
- // It should be packed with objects from the bottom to the top.
- Address current = current_page->ObjectAreaStart();
- while (current < top) {
- HeapObject* object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object->Verify();
-
- // All the interior pointers should be contained in the heap and
- // have page regions covering intergenerational references should be
- // marked dirty.
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
-
- current += size;
+ // We can only iterate over the pages if they were swept precisely.
+ if (was_swept_conservatively_) return;
+
+ bool allocation_pointer_found_in_space =
+ (allocation_info_.top == allocation_info_.limit);
+ PageIterator page_iterator(this);
+ while (page_iterator.has_next()) {
+ Page* page = page_iterator.next();
+ ASSERT(page->owner() == this);
+ if (page == Page::FromAllocationTop(allocation_info_.top)) {
+ allocation_pointer_found_in_space = true;
+ }
+ ASSERT(page->WasSweptPrecisely());
+ HeapObjectIterator it(page, NULL);
+ Address end_of_previous_object = page->ObjectAreaStart();
+ Address top = page->ObjectAreaEnd();
+ int black_size = 0;
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ ASSERT(end_of_previous_object <= object->address());
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(heap()->map_space()->Contains(map));
+
+ // Perform space-specific object verification.
+ VerifyObject(object);
+
+ // The object itself should look OK.
+ object->Verify();
+
+ // All the interior pointers should be contained in the heap.
+ int size = object->Size();
+ object->IterateBody(map->instance_type(), size, visitor);
+ if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+ black_size += size;
}
- // The allocation pointer should not be in the middle of an object.
- ASSERT(current == top);
+ ASSERT(object->address() + size <= top);
+ end_of_previous_object = object->address() + size;
}
-
- current_page = current_page->next_page();
+ ASSERT_LE(black_size, page->LiveBytes());
}
+ ASSERT(allocation_pointer_found_in_space);
}
#endif
@@ -1177,13 +873,23 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// NewSpace implementation
-bool NewSpace::Setup(Address start, int size) {
+bool NewSpace::Setup(int reserved_semispace_capacity,
+ int maximum_semispace_capacity) {
// Setup new space based on the preallocated memory block defined by
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
- int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
+
+ size_t size = 2 * reserved_semispace_capacity;
+ Address base =
+ heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+ size, size, &reservation_);
+ if (base == NULL) return false;
+
+ chunk_base_ = base;
+ chunk_size_ = static_cast<uintptr_t>(size);
+ LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -1197,31 +903,29 @@ bool NewSpace::Setup(Address start, int size) {
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
- ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
- ASSERT(IsAddressAligned(start, size, 0));
+ ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
+ ASSERT(static_cast<intptr_t>(chunk_size_) >=
+ 2 * heap()->ReservedSemiSpaceSize());
+ ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
- if (!to_space_.Setup(start,
+ if (!to_space_.Setup(chunk_base_,
initial_semispace_capacity,
maximum_semispace_capacity)) {
return false;
}
- if (!from_space_.Setup(start + maximum_semispace_capacity,
+ if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
initial_semispace_capacity,
maximum_semispace_capacity)) {
return false;
}
- start_ = start;
- address_mask_ = ~(size - 1);
+ start_ = chunk_base_;
+ address_mask_ = ~(2 * reserved_semispace_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+ object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
- allocation_info_.top = to_space_.low();
- allocation_info_.limit = to_space_.high();
- mc_forwarding_info_.top = NULL;
- mc_forwarding_info_.limit = NULL;
+ ResetAllocationInfo();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
@@ -1239,28 +943,34 @@ void NewSpace::TearDown() {
start_ = NULL;
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
- mc_forwarding_info_.top = NULL;
- mc_forwarding_info_.limit = NULL;
to_space_.TearDown();
from_space_.TearDown();
+
+ LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+
+ ASSERT(reservation_.IsReserved());
+ heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+ NOT_EXECUTABLE);
+ chunk_base_ = NULL;
+ chunk_size_ = 0;
}
void NewSpace::Flip() {
- SemiSpace tmp = from_space_;
- from_space_ = to_space_;
- to_space_ = tmp;
+ SemiSpace::Swap(&from_space_, &to_space_);
}
void NewSpace::Grow() {
+ // Double the semispace size but only up to maximum capacity.
ASSERT(Capacity() < MaximumCapacity());
- if (to_space_.Grow()) {
- // Only grow from space if we managed to grow to space.
- if (!from_space_.Grow()) {
- // If we managed to grow to space but couldn't grow from space,
- // attempt to shrink to space.
+ int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
+ if (to_space_.GrowTo(new_capacity)) {
+ // Only grow from space if we managed to grow to-space.
+ if (!from_space_.GrowTo(new_capacity)) {
+ // If we managed to grow to-space but couldn't grow from-space,
+ // attempt to shrink to-space.
if (!to_space_.ShrinkTo(from_space_.Capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
@@ -1268,21 +978,20 @@ void NewSpace::Grow() {
}
}
}
- allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::Shrink() {
int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
- int rounded_new_capacity =
- RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
+ int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < Capacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from space if we managed to shrink to space.
+ // Only shrink from-space if we managed to shrink to-space.
+ from_space_.Reset();
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to space but couldn't shrink from
- // space, attempt to grow to space again.
+ // If we managed to shrink to-space but couldn't shrink from
+ // space, attempt to grow to-space again.
if (!to_space_.GrowTo(from_space_.Capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
@@ -1290,36 +999,98 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.limit = to_space_.high();
+ allocation_info_.limit = to_space_.page_high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-void NewSpace::ResetAllocationInfo() {
- allocation_info_.top = to_space_.low();
- allocation_info_.limit = to_space_.high();
+void NewSpace::UpdateAllocationInfo() {
+ allocation_info_.top = to_space_.page_low();
+ allocation_info_.limit = to_space_.page_high();
+
+ // Lower limit during incremental marking.
+ if (heap()->incremental_marking()->IsMarking() &&
+ inline_allocation_limit_step() != 0) {
+ Address new_limit =
+ allocation_info_.top + inline_allocation_limit_step();
+ allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+ }
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-void NewSpace::MCResetRelocationInfo() {
- mc_forwarding_info_.top = from_space_.low();
- mc_forwarding_info_.limit = from_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
+void NewSpace::ResetAllocationInfo() {
+ to_space_.Reset();
+ UpdateAllocationInfo();
+ pages_used_ = 0;
+ // Clear all mark-bits in the to-space.
+ NewSpacePageIterator it(&to_space_);
+ while (it.has_next()) {
+ Bitmap::Clear(it.next());
+ }
}
-void NewSpace::MCCommitRelocationInfo() {
- // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
- // valid allocation info for the to space.
- allocation_info_.top = mc_forwarding_info_.top;
- allocation_info_.limit = to_space_.high();
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+bool NewSpace::AddFreshPage() {
+ Address top = allocation_info_.top;
+ if (NewSpacePage::IsAtStart(top)) {
+ // The current page is already empty. Don't try to make another.
+
+ // We should only get here if someone asks to allocate more
+ // than what can be stored in a single page.
+ // TODO(gc): Change the limit on new-space allocation to prevent this
+ // from happening (all such allocations should go directly to LOSpace).
+ return false;
+ }
+ if (!to_space_.AdvancePage()) {
+ // Failed to get a new page in to-space.
+ return false;
+ }
+
+ // Clear remainder of current page.
+ Address limit = NewSpacePage::FromLimit(top)->body_limit();
+ if (heap()->gc_state() == Heap::SCAVENGE) {
+ heap()->promotion_queue()->SetNewLimit(limit);
+ heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
+ }
+
+ int remaining_in_page = static_cast<int>(limit - top);
+ heap()->CreateFillerObjectAt(top, remaining_in_page);
+ pages_used_++;
+ UpdateAllocationInfo();
+
+ return true;
+}
+
+
+MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+ Address old_top = allocation_info_.top;
+ Address new_top = old_top + size_in_bytes;
+ Address high = to_space_.page_high();
+ if (allocation_info_.limit < high) {
+ // Incremental marking has lowered the limit to get a
+ // chance to do a step.
+ allocation_info_.limit = Min(
+ allocation_info_.limit + inline_allocation_limit_step_,
+ high);
+ int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = new_top;
+ return AllocateRaw(size_in_bytes);
+ } else if (AddFreshPage()) {
+ // Switched to new page. Try allocating again.
+ int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = to_space_.page_low();
+ return AllocateRaw(size_in_bytes);
+ } else {
+ return Failure::RetryAfterGC();
+ }
}
#ifdef DEBUG
-// We do not use the SemispaceIterator because verification doesn't assume
+// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
// The allocation pointer should be in the space or at the very end.
@@ -1327,58 +1098,52 @@ void NewSpace::Verify() {
// There should be objects packed in from the low address up to the
// allocation pointer.
- Address current = to_space_.low();
- while (current < top()) {
- HeapObject* object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ Address current = to_space_.first_page()->body();
+ CHECK_EQ(current, to_space_.space_start());
- // The object should not be code or a map.
- ASSERT(!object->IsMap());
- ASSERT(!object->IsCode());
+ while (current != top()) {
+ if (!NewSpacePage::IsAtEnd(current)) {
+ // The allocation pointer should not be in the middle of an object.
+ CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+ current < top());
- // The object itself should look OK.
- object->Verify();
+ HeapObject* object = HeapObject::FromAddress(current);
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
- current += size;
- }
+ // The object should not be code or a map.
+ CHECK(!object->IsMap());
+ CHECK(!object->IsCode());
- // The allocation pointer should not be in the middle of an object.
- ASSERT(current == top());
-}
-#endif
+ // The object itself should look OK.
+ object->Verify();
+ // All the interior pointers should be contained in the heap.
+ VerifyPointersVisitor visitor;
+ int size = object->Size();
+ object->IterateBody(map->instance_type(), size, &visitor);
-bool SemiSpace::Commit() {
- ASSERT(!is_committed());
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_, capacity_, executable())) {
- return false;
+ current += size;
+ } else {
+ // At end of page, switch to next page.
+ NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+ // Next page should be valid.
+ CHECK(!page->is_anchor());
+ current = page->body();
+ }
}
- committed_ = true;
- return true;
-}
-
-bool SemiSpace::Uncommit() {
- ASSERT(is_committed());
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- start_, capacity_)) {
- return false;
- }
- committed_ = false;
- return true;
+ // Check semi-spaces.
+ ASSERT_EQ(from_space_.id(), kFromSpace);
+ ASSERT_EQ(to_space_.id(), kToSpace);
+ from_space_.Verify();
+ to_space_.Verify();
}
-
+#endif
// -----------------------------------------------------------------------------
// SemiSpace implementation
@@ -1392,11 +1157,11 @@ bool SemiSpace::Setup(Address start,
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
- initial_capacity_ = initial_capacity;
+ ASSERT(maximum_capacity >= Page::kPageSize);
+ initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
capacity_ = initial_capacity;
- maximum_capacity_ = maximum_capacity;
+ maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
committed_ = false;
-
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTagMask;
@@ -1413,81 +1178,258 @@ void SemiSpace::TearDown() {
}
-bool SemiSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- int maximum_extra = maximum_capacity_ - capacity_;
- int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
- maximum_extra);
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
- high(), extra, executable())) {
+bool SemiSpace::Commit() {
+ ASSERT(!is_committed());
+ int pages = capacity_ / Page::kPageSize;
+ Address end = start_ + maximum_capacity_;
+ Address start = end - pages * Page::kPageSize;
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+ capacity_,
+ executable())) {
return false;
}
- capacity_ += extra;
+
+ NewSpacePage* page = anchor();
+ for (int i = 1; i <= pages; i++) {
+ NewSpacePage* new_page =
+ NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
+ new_page->InsertAfter(page);
+ page = new_page;
+ }
+
+ committed_ = true;
+ Reset();
+ return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+ ASSERT(is_committed());
+ Address start = start_ + maximum_capacity_ - capacity_;
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
+ return false;
+ }
+ anchor()->set_next_page(anchor());
+ anchor()->set_prev_page(anchor());
+
+ committed_ = false;
return true;
}
bool SemiSpace::GrowTo(int new_capacity) {
+ ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity <= maximum_capacity_);
ASSERT(new_capacity > capacity_);
+ int pages_before = capacity_ / Page::kPageSize;
+ int pages_after = new_capacity / Page::kPageSize;
+
+ Address end = start_ + maximum_capacity_;
+ Address start = end - new_capacity;
size_t delta = new_capacity - capacity_;
+
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- high(), delta, executable())) {
+ start, delta, executable())) {
return false;
}
capacity_ = new_capacity;
+ NewSpacePage* last_page = anchor()->prev_page();
+ ASSERT(last_page != anchor());
+ for (int i = pages_before + 1; i <= pages_after; i++) {
+ Address page_address = end - i * Page::kPageSize;
+ NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
+ page_address,
+ this);
+ new_page->InsertAfter(last_page);
+ Bitmap::Clear(new_page);
+ // Duplicate the flags that was set on the old page.
+ new_page->SetFlags(last_page->GetFlags(),
+ NewSpacePage::kCopyOnFlipFlagsMask);
+ last_page = new_page;
+ }
return true;
}
bool SemiSpace::ShrinkTo(int new_capacity) {
+ ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
+ // Semispaces grow backwards from the end of their allocated capacity,
+ // so we find the before and after start addresses relative to the
+ // end of the space.
+ Address space_end = start_ + maximum_capacity_;
+ Address old_start = space_end - capacity_;
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- high() - delta, delta)) {
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
return false;
}
capacity_ = new_capacity;
+
+ int pages_after = capacity_ / Page::kPageSize;
+ NewSpacePage* new_last_page =
+ NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+ new_last_page->set_next_page(anchor());
+ anchor()->set_prev_page(new_last_page);
+ ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+
return true;
}
+void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+ anchor_.set_owner(this);
+ // Fixup back-pointers to anchor. Address of anchor changes
+ // when we swap.
+ anchor_.prev_page()->set_next_page(&anchor_);
+ anchor_.next_page()->set_prev_page(&anchor_);
+
+ bool becomes_to_space = (id_ == kFromSpace);
+ id_ = becomes_to_space ? kToSpace : kFromSpace;
+ NewSpacePage* page = anchor_.next_page();
+ while (page != &anchor_) {
+ page->set_owner(this);
+ page->SetFlags(flags, mask);
+ if (becomes_to_space) {
+ page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+ page->SetFlag(MemoryChunk::IN_TO_SPACE);
+ page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ page->ResetLiveBytes();
+ } else {
+ page->SetFlag(MemoryChunk::IN_FROM_SPACE);
+ page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+ }
+ ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+ ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
+ page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+ page = page->next_page();
+ }
+}
+
+
+void SemiSpace::Reset() {
+ ASSERT(anchor_.next_page() != &anchor_);
+ current_page_ = anchor_.next_page();
+}
+
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+ // We won't be swapping semispaces without data in them.
+ ASSERT(from->anchor_.next_page() != &from->anchor_);
+ ASSERT(to->anchor_.next_page() != &to->anchor_);
+
+ // Swap bits.
+ SemiSpace tmp = *from;
+ *from = *to;
+ *to = tmp;
+
+ // Fixup back-pointers to the page list anchor now that its address
+ // has changed.
+ // Swap to/from-space bits on pages.
+ // Copy GC flags from old active space (from-space) to new (to-space).
+ intptr_t flags = from->current_page()->GetFlags();
+ to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+
+ from->FlipPages(0, 0);
+}
+
+
+void SemiSpace::set_age_mark(Address mark) {
+ ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
+ age_mark_ = mark;
+ // Mark all pages up to the one containing mark.
+ NewSpacePageIterator it(space_start(), mark);
+ while (it.has_next()) {
+ it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ }
+}
+
+
#ifdef DEBUG
void SemiSpace::Print() { }
-void SemiSpace::Verify() { }
+void SemiSpace::Verify() {
+ bool is_from_space = (id_ == kFromSpace);
+ NewSpacePage* page = anchor_.next_page();
+ CHECK(anchor_.semi_space() == this);
+ while (page != &anchor_) {
+ CHECK(page->semi_space() == this);
+ CHECK(page->InNewSpace());
+ CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
+ : MemoryChunk::IN_TO_SPACE));
+ CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
+ : MemoryChunk::IN_FROM_SPACE));
+ CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+ if (!is_from_space) {
+ // The pointers-from-here-are-interesting flag isn't updated dynamically
+ // on from-space pages, so it might be out of sync with the marking state.
+ if (page->heap()->incremental_marking()->IsMarking()) {
+ CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ } else {
+ CHECK(!page->IsFlagSet(
+ MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ }
+ // TODO(gc): Check that the live_bytes_count_ field matches the
+ // black marking on the page (if we make it match in new-space).
+ }
+ CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+ CHECK(page->prev_page()->next_page() == page);
+ page = page->next_page();
+ }
+}
+
+
+void SemiSpace::AssertValidRange(Address start, Address end) {
+ // Addresses belong to same semi-space
+ NewSpacePage* page = NewSpacePage::FromLimit(start);
+ NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+ SemiSpace* space = page->semi_space();
+ CHECK_EQ(space, end_page->semi_space());
+ // Start address is before end address, either on same page,
+ // or end address is on a later page in the linked list of
+ // semi-space pages.
+ if (page == end_page) {
+ CHECK(start <= end);
+ } else {
+ while (page != end_page) {
+ page = page->next_page();
+ CHECK_NE(page, space->anchor());
+ }
+ }
+}
#endif
// -----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space, space->bottom(), space->top(), NULL);
+ Initialize(space->bottom(), space->top(), NULL);
}
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
HeapObjectCallback size_func) {
- Initialize(space, space->bottom(), space->top(), size_func);
+ Initialize(space->bottom(), space->top(), size_func);
}
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(space, start, space->top(), NULL);
+ Initialize(start, space->top(), NULL);
}
-void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+ Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start,
Address end,
HeapObjectCallback size_func) {
- ASSERT(space->ToSpaceContains(start));
- ASSERT(space->ToSpaceLow() <= end
- && end <= space->ToSpaceHigh());
- space_ = &space->to_space_;
+ SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
size_func_ = size_func;
@@ -1623,7 +1565,7 @@ void NewSpace::ClearHistograms() {
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
RecordAllocation(obj);
}
@@ -1699,7 +1641,6 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
promoted_histogram_[type].increment_bytes(obj->Size());
}
-
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
@@ -1708,541 +1649,507 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
ASSERT(IsAligned(size_in_bytes, kPointerSize));
// We write a map and possibly size information to the block. If the block
- // is big enough to be a ByteArray with at least one extra word (the next
- // pointer), we set its map to be the byte array map and its size to an
+ // is big enough to be a FreeSpace with at least one extra word (the next
+ // pointer), we set its map to be the free space map and its size to an
// appropriate array length for the desired size from HeapObject::Size().
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
- if (size_in_bytes > ByteArray::kHeaderSize) {
- set_map(heap->raw_unchecked_byte_array_map());
- // Can't use ByteArray::cast because it fails during deserialization.
- ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
- this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
+ if (size_in_bytes > FreeSpace::kHeaderSize) {
+ set_map_unsafe(heap->raw_unchecked_free_space_map());
+ // Can't use FreeSpace::cast because it fails during deserialization.
+ FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+ this_as_free_space->set_size(size_in_bytes);
} else if (size_in_bytes == kPointerSize) {
- set_map(heap->raw_unchecked_one_pointer_filler_map());
+ set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map(heap->raw_unchecked_two_pointer_filler_map());
+ set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
// We would like to ASSERT(Size() == size_in_bytes) but this would fail during
- // deserialization because the byte array map is not done yet.
+ // deserialization because the free space map is not done yet.
}
-Address FreeListNode::next(Heap* heap) {
+FreeListNode* FreeListNode::next() {
ASSERT(IsFreeListNode(this));
- if (map() == heap->raw_unchecked_byte_array_map()) {
- ASSERT(Size() >= kNextOffset + kPointerSize);
- return Memory::Address_at(address() + kNextOffset);
+ if (map() == HEAP->raw_unchecked_free_space_map()) {
+ ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+ return reinterpret_cast<FreeListNode*>(
+ Memory::Address_at(address() + kNextOffset));
} else {
- return Memory::Address_at(address() + kPointerSize);
+ return reinterpret_cast<FreeListNode*>(
+ Memory::Address_at(address() + kPointerSize));
}
}
-void FreeListNode::set_next(Heap* heap, Address next) {
+FreeListNode** FreeListNode::next_address() {
ASSERT(IsFreeListNode(this));
- if (map() == heap->raw_unchecked_byte_array_map()) {
+ if (map() == HEAP->raw_unchecked_free_space_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
- Memory::Address_at(address() + kNextOffset) = next;
+ return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
} else {
- Memory::Address_at(address() + kPointerSize) = next;
+ return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
}
}
-OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
- : heap_(heap),
- owner_(owner) {
- Reset();
+void FreeListNode::set_next(FreeListNode* next) {
+ ASSERT(IsFreeListNode(this));
+ // While we are booting the VM the free space map will actually be null. So
+ // we have to make sure that we don't try to use it for anything at that
+ // stage.
+ if (map() == HEAP->raw_unchecked_free_space_map()) {
+ ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+ Memory::Address_at(address() + kNextOffset) =
+ reinterpret_cast<Address>(next);
+ } else {
+ Memory::Address_at(address() + kPointerSize) =
+ reinterpret_cast<Address>(next);
+ }
}
-void OldSpaceFreeList::Reset() {
- available_ = 0;
- for (int i = 0; i < kFreeListsLength; i++) {
- free_[i].head_node_ = NULL;
- }
- needs_rebuild_ = false;
- finger_ = kHead;
- free_[kHead].next_size_ = kEnd;
+FreeList::FreeList(PagedSpace* owner)
+ : owner_(owner), heap_(owner->heap()) {
+ Reset();
}
-void OldSpaceFreeList::RebuildSizeList() {
- ASSERT(needs_rebuild_);
- int cur = kHead;
- for (int i = cur + 1; i < kFreeListsLength; i++) {
- if (free_[i].head_node_ != NULL) {
- free_[cur].next_size_ = i;
- cur = i;
- }
- }
- free_[cur].next_size_ = kEnd;
- needs_rebuild_ = false;
+void FreeList::Reset() {
+ available_ = 0;
+ small_list_ = NULL;
+ medium_list_ = NULL;
+ large_list_ = NULL;
+ huge_list_ = NULL;
}
-int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
-#ifdef DEBUG
- Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
-#endif
+int FreeList::Free(Address start, int size_in_bytes) {
+ if (size_in_bytes == 0) return 0;
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(heap_, size_in_bytes);
- // We don't use the freelists in compacting mode. This makes it more like a
- // GC that only has mark-sweep-compact and doesn't have a mark-sweep
- // collector.
- if (FLAG_always_compact) {
- return size_in_bytes;
- }
-
- // Early return to drop too-small blocks on the floor (one or two word
- // blocks cannot hold a map pointer, a size field, and a pointer to the
- // next block in the free list).
- if (size_in_bytes < kMinBlockSize) {
- return size_in_bytes;
+ // Early return to drop too-small blocks on the floor.
+ if (size_in_bytes < kSmallListMin) return size_in_bytes;
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ if (size_in_bytes <= kSmallListMax) {
+ node->set_next(small_list_);
+ small_list_ = node;
+ } else if (size_in_bytes <= kMediumListMax) {
+ node->set_next(medium_list_);
+ medium_list_ = node;
+ } else if (size_in_bytes <= kLargeListMax) {
+ node->set_next(large_list_);
+ large_list_ = node;
+ } else {
+ node->set_next(huge_list_);
+ huge_list_ = node;
}
-
- // Insert other blocks at the head of an exact free list.
- int index = size_in_bytes >> kPointerSizeLog2;
- node->set_next(heap_, free_[index].head_node_);
- free_[index].head_node_ = node->address();
available_ += size_in_bytes;
- needs_rebuild_ = true;
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
return 0;
}
-MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
- ASSERT(0 < size_in_bytes);
- ASSERT(size_in_bytes <= kMaxBlockSize);
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
+ FreeListNode* node = *list;
- if (needs_rebuild_) RebuildSizeList();
- int index = size_in_bytes >> kPointerSizeLog2;
- // Check for a perfect fit.
- if (free_[index].head_node_ != NULL) {
- FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
- // If this was the last block of its size, remove the size.
- if ((free_[index].head_node_ = node->next(heap_)) == NULL)
- RemoveSize(index);
- available_ -= size_in_bytes;
- *wasted_bytes = 0;
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
- return node;
- }
- // Search the size list for the best fit.
- int prev = finger_ < index ? finger_ : kHead;
- int cur = FindSize(index, &prev);
- ASSERT(index < cur);
- if (cur == kEnd) {
- // No large enough size in list.
- *wasted_bytes = 0;
- return Failure::RetryAfterGC(owner_);
- }
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
- int rem = cur - index;
- int rem_bytes = rem << kPointerSizeLog2;
- FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
- ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
- FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
- size_in_bytes);
- // Distinguish the cases prev < rem < cur and rem <= prev < cur
- // to avoid many redundant tests and calls to Insert/RemoveSize.
- if (prev < rem) {
- // Simple case: insert rem between prev and cur.
- finger_ = prev;
- free_[prev].next_size_ = rem;
- // If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
- free_[rem].next_size_ = free_[cur].next_size_;
- } else {
- free_[rem].next_size_ = cur;
- }
- // Add the remainder block.
- rem_node->set_size(heap_, rem_bytes);
- rem_node->set_next(heap_, free_[rem].head_node_);
- free_[rem].head_node_ = rem_node->address();
+ if (node == NULL) return NULL;
+
+ while (node != NULL &&
+ Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+ available_ -= node->Size();
+ node = node->next();
+ }
+
+ if (node != NULL) {
+ *node_size = node->Size();
+ *list = node->next();
} else {
- // If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
- finger_ = prev;
- free_[prev].next_size_ = free_[cur].next_size_;
- }
- if (rem_bytes < kMinBlockSize) {
- // Too-small remainder is wasted.
- rem_node->set_size(heap_, rem_bytes);
- available_ -= size_in_bytes + rem_bytes;
- *wasted_bytes = rem_bytes;
- return cur_node;
- }
- // Add the remainder block and, if needed, insert its size.
- rem_node->set_size(heap_, rem_bytes);
- rem_node->set_next(heap_, free_[rem].head_node_);
- free_[rem].head_node_ = rem_node->address();
- if (rem_node->next(heap_) == NULL) InsertSize(rem);
+ *list = NULL;
}
- available_ -= size_in_bytes;
- *wasted_bytes = 0;
- return cur_node;
+
+ return node;
}
-void OldSpaceFreeList::MarkNodes() {
- for (int i = 0; i < kFreeListsLength; i++) {
- Address cur_addr = free_[i].head_node_;
- while (cur_addr != NULL) {
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next(heap_);
- cur_node->SetMark();
- }
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+ FreeListNode* node = NULL;
+
+ if (size_in_bytes <= kSmallAllocationMax) {
+ node = PickNodeFromList(&small_list_, node_size);
+ if (node != NULL) return node;
}
-}
+ if (size_in_bytes <= kMediumAllocationMax) {
+ node = PickNodeFromList(&medium_list_, node_size);
+ if (node != NULL) return node;
+ }
-#ifdef DEBUG
-bool OldSpaceFreeList::Contains(FreeListNode* node) {
- for (int i = 0; i < kFreeListsLength; i++) {
- Address cur_addr = free_[i].head_node_;
- while (cur_addr != NULL) {
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- if (cur_node == node) return true;
- cur_addr = cur_node->next(heap_);
+ if (size_in_bytes <= kLargeAllocationMax) {
+ node = PickNodeFromList(&large_list_, node_size);
+ if (node != NULL) return node;
+ }
+
+ for (FreeListNode** cur = &huge_list_;
+ *cur != NULL;
+ cur = (*cur)->next_address()) {
+ FreeListNode* cur_node = *cur;
+ while (cur_node != NULL &&
+ Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+ available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ cur_node = cur_node->next();
+ }
+
+ *cur = cur_node;
+ if (cur_node == NULL) break;
+
+ ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+ FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+ int size = cur_as_free_space->Size();
+ if (size >= size_in_bytes) {
+ // Large enough node found. Unlink it from the list.
+ node = *cur;
+ *node_size = size;
+ *cur = node->next();
+ break;
}
}
- return false;
+
+ return node;
}
-#endif
-FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
- AllocationSpace owner,
- int object_size)
- : heap_(heap), owner_(owner), object_size_(object_size) {
- Reset();
-}
+// Allocation on the old space free list. If it succeeds then a new linear
+// allocation space has been set up with the top and limit of the space. If
+// the allocation fails then NULL is returned, and the caller can perform a GC
+// or allocate a new page before retrying.
+HeapObject* FreeList::Allocate(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
+ ASSERT(size_in_bytes <= kMaxBlockSize);
+ ASSERT(IsAligned(size_in_bytes, kPointerSize));
+ // Don't free list allocate if there is linear space available.
+ ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
+ int new_node_size = 0;
+ FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ if (new_node == NULL) return NULL;
-void FixedSizeFreeList::Reset() {
- available_ = 0;
- head_ = tail_ = NULL;
-}
+ available_ -= new_node_size;
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
+
+ int bytes_left = new_node_size - size_in_bytes;
+ ASSERT(bytes_left >= 0);
+ int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap. This also puts it back in the free list
+ // if it is big enough.
+ owner_->Free(owner_->top(), old_linear_size);
-void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
- Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
+ for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+ reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+ }
#endif
- // We only use the freelists with mark-sweep.
- ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, object_size_);
- node->set_next(heap_, NULL);
- if (head_ == NULL) {
- tail_ = head_ = node->address();
+
+ owner_->heap()->incremental_marking()->OldSpaceStep(
+ size_in_bytes - old_linear_size);
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ owner_->Allocate(new_node_size);
+
+ if (bytes_left > kThreshold &&
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+ FLAG_incremental_marking_steps) {
+ int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+ // We don't want to give too large linear areas to the allocator while
+ // incremental marking is going on, because we won't check again whether
+ // we want to do another increment until the linear area is used up.
+ owner_->Free(new_node->address() + size_in_bytes + linear_size,
+ new_node_size - size_in_bytes - linear_size);
+ owner_->SetTop(new_node->address() + size_in_bytes,
+ new_node->address() + size_in_bytes + linear_size);
+ } else if (bytes_left > 0) {
+ // Normally we give the rest of the node to the allocator as its new
+ // linear allocation area.
+ owner_->SetTop(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
} else {
- FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
- tail_ = node->address();
+ // TODO(gc) Try not freeing linear allocation region when bytes_left
+ // are zero.
+ owner_->SetTop(NULL, NULL);
}
- available_ += object_size_;
+
+ return new_node;
}
-MaybeObject* FixedSizeFreeList::Allocate() {
- if (head_ == NULL) {
- return Failure::RetryAfterGC(owner_);
+static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
+ intptr_t sum = 0;
+ while (n != NULL) {
+ if (Page::FromAddress(n->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
+ sum += free_space->Size();
+ }
+ n = n->next();
}
+ return sum;
+}
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
- FreeListNode* node = FreeListNode::FromAddress(head_);
- head_ = node->next(heap_);
- available_ -= object_size_;
- return node;
+
+void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
+ sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+ if (sizes->huge_size_ < Page::kObjectAreaSize) {
+ sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
+ sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
+ sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+ } else {
+ sizes->small_size_ = 0;
+ sizes->medium_size_ = 0;
+ sizes->large_size_ = 0;
+ }
}
-void FixedSizeFreeList::MarkNodes() {
- Address cur_addr = head_;
- while (cur_addr != NULL && cur_addr != tail_) {
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next(heap_);
- cur_node->SetMark();
+static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
+ intptr_t sum = 0;
+ while (*n != NULL) {
+ if (Page::FromAddress((*n)->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ sum += free_space->Size();
+ *n = (*n)->next();
+ } else {
+ n = (*n)->next_address();
+ }
}
+ return sum;
}
-// -----------------------------------------------------------------------------
-// OldSpace implementation
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+ intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
-void OldSpace::PrepareForMarkCompact(bool will_compact) {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact(will_compact);
-
- if (will_compact) {
- // Reset relocation info. During a compacting collection, everything in
- // the space is considered 'available' and we will rediscover live data
- // and waste during the collection.
- MCResetRelocationInfo();
- ASSERT(Available() == Capacity());
- } else {
- // During a non-compacting collection, everything below the linear
- // allocation pointer is considered allocated (everything above is
- // available) and we will rediscover available and wasted bytes during
- // the collection.
- accounting_stats_.AllocateBytes(free_list_.available());
- accounting_stats_.FillWastedBytes(Waste());
+ if (sum < Page::kObjectAreaSize) {
+ sum += EvictFreeListItemsInList(&small_list_, p) +
+ EvictFreeListItemsInList(&medium_list_, p) +
+ EvictFreeListItemsInList(&large_list_, p);
}
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
+ available_ -= static_cast<int>(sum);
+
+ return sum;
+}
+
+
+#ifdef DEBUG
+intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+ intptr_t sum = 0;
+ while (cur != NULL) {
+ ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
+ FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+ sum += cur_as_free_space->Size();
+ cur = cur->next();
+ }
+ return sum;
}
-void OldSpace::MCCommitRelocationInfo() {
- // Update fast allocation info.
- allocation_info_.top = mc_forwarding_info_.top;
- allocation_info_.limit = mc_forwarding_info_.limit;
- ASSERT(allocation_info_.VerifyPagedAllocation());
+static const int kVeryLongFreeList = 500;
- // The space is compacted and we haven't yet built free lists or
- // wasted any space.
- ASSERT(Waste() == 0);
- ASSERT(AvailableFree() == 0);
- // Build the free list for the space.
- int computed_size = 0;
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
- while (it.has_next()) {
- Page* p = it.next();
- // Space below the relocation pointer is allocated.
- computed_size +=
- static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
- if (it.has_next()) {
- // Free the space at the top of the page.
- int extra_size =
- static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
- if (extra_size > 0) {
- int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
- extra_size);
- // The bytes we have just "freed" to add to the free list were
- // already accounted as available.
- accounting_stats_.WasteBytes(wasted_bytes);
- }
- }
+int FreeList::FreeListLength(FreeListNode* cur) {
+ int length = 0;
+ while (cur != NULL) {
+ length++;
+ cur = cur->next();
+ if (length == kVeryLongFreeList) return length;
}
+ return length;
+}
+
- // Make sure the computed size - based on the used portion of the pages in
- // use - matches the size obtained while computing forwarding addresses.
- ASSERT(computed_size == Size());
+bool FreeList::IsVeryLong() {
+ if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
+ if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
+ if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
+ if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
+ return false;
}
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+intptr_t FreeList::SumFreeLists() {
+ intptr_t sum = SumFreeList(small_list_);
+ sum += SumFreeList(medium_list_);
+ sum += SumFreeList(large_list_);
+ sum += SumFreeList(huge_list_);
+ return sum;
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
bool NewSpace::ReserveSpace(int bytes) {
// We can't reliably unpack a partial snapshot that needs more new space
- // space than the minimum NewSpace size.
+ // space than the minimum NewSpace size. The limit can be set lower than
+ // the end of new space either because there is more space on the next page
+ // or because we have lowered the limit in order to get periodic incremental
+ // marking. The most reliable way to ensure that there is linear space is
+ // to do the allocation, then rewind the limit.
ASSERT(bytes <= InitialCapacity());
- Address limit = allocation_info_.limit;
+ MaybeObject* maybe = AllocateRaw(bytes);
+ Object* object = NULL;
+ if (!maybe->ToObject(&object)) return false;
+ HeapObject* allocation = HeapObject::cast(object);
Address top = allocation_info_.top;
- return limit - top >= bytes;
+ if ((top - bytes) == allocation->address()) {
+ allocation_info_.top = allocation->address();
+ return true;
+ }
+ // There may be a borderline case here where the allocation succeeded, but
+ // the limit and top have moved on to a new page. In that case we try again.
+ return ReserveSpace(bytes);
+}
+
+
+void PagedSpace::PrepareForMarkCompact() {
+ // We don't have a linear allocation area while sweeping. It will be restored
+ // on the first allocation after the sweep.
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ int old_linear_size = static_cast<int>(limit() - top());
+ Free(top(), old_linear_size);
+ SetTop(NULL, NULL);
+
+ // Stop lazy sweeping and clear marking bits for unswept pages.
+ if (first_unswept_page_ != NULL) {
+ Page* p = first_unswept_page_;
+ do {
+ // Do not use ShouldBeSweptLazily predicate here.
+ // New evacuation candidates were selected but they still have
+ // to be swept before collection starts.
+ if (!p->WasSwept()) {
+ Bitmap::Clear(p);
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ }
+ p = p->next_page();
+ } while (p != anchor());
+ }
+ first_unswept_page_ = Page::FromAddress(NULL);
+
+ // Clear the free list before a full GC---it will be rebuilt afterward.
+ free_list_.Reset();
}
-void PagedSpace::FreePages(Page* prev, Page* last) {
- if (last == AllocationTopPage()) {
- // Pages are already at the end of used pages.
- return;
- }
+bool PagedSpace::ReserveSpace(int size_in_bytes) {
+ ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
+ ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
+ Address current_top = allocation_info_.top;
+ Address new_top = current_top + size_in_bytes;
+ if (new_top <= allocation_info_.limit) return true;
- Page* first = NULL;
+ HeapObject* new_area = free_list_.Allocate(size_in_bytes);
+ if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
+ if (new_area == NULL) return false;
- // Remove pages from the list.
- if (prev == NULL) {
- first = first_page_;
- first_page_ = last->next_page();
- } else {
- first = prev->next_page();
- heap()->isolate()->memory_allocator()->SetNextPage(
- prev, last->next_page());
- }
+ int old_linear_size = static_cast<int>(limit() - top());
+ // Mark the old linear allocation area with a free space so it can be
+ // skipped when scanning the heap. This also puts it back in the free list
+ // if it is big enough.
+ Free(top(), old_linear_size);
- // Attach it after the last page.
- heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
- last_page_ = last;
- heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
+ SetTop(new_area->address(), new_area->address() + size_in_bytes);
+ Allocate(size_in_bytes);
+ return true;
+}
- // Clean them up.
- do {
- first->InvalidateWatermark(true);
- first->SetAllocationWatermark(first->ObjectAreaStart());
- first->SetCachedAllocationWatermark(first->ObjectAreaStart());
- first->SetRegionMarks(Page::kAllRegionsCleanMarks);
- first = first->next_page();
- } while (first != NULL);
-
- // Order of pages in this space might no longer be consistent with
- // order of pages in chunks.
- page_list_is_chunk_ordered_ = false;
-}
-
-
-void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
- const bool add_to_freelist = true;
-
- // Mark used and unused pages to properly fill unused pages
- // after reordering.
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
- Page* last_in_use = AllocationTopPage();
- bool in_use = true;
-
- while (all_pages_iterator.has_next()) {
- Page* p = all_pages_iterator.next();
- p->SetWasInUseBeforeMC(in_use);
- if (p == last_in_use) {
- // We passed a page containing allocation top. All consequent
- // pages are not used.
- in_use = false;
- }
- }
- if (page_list_is_chunk_ordered_) return;
+// You have to call this last, since the implementation from PagedSpace
+// doesn't know that memory was 'promised' to large object space.
+bool LargeObjectSpace::ReserveSpace(int bytes) {
+ return heap()->OldGenerationSpaceAvailable() >= bytes;
+}
- Page* new_last_in_use = Page::FromAddress(NULL);
- heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
- this, &first_page_, &last_page_, &new_last_in_use);
- ASSERT(new_last_in_use->is_valid());
- if (new_last_in_use != last_in_use) {
- // Current allocation top points to a page which is now in the middle
- // of page list. We should move allocation top forward to the new last
- // used page so various object iterators will continue to work properly.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
- last_in_use->AllocationTop());
+bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
+ if (IsSweepingComplete()) return true;
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
- if (size_in_bytes > 0) {
- Address start = last_in_use->AllocationTop();
- if (deallocate_blocks) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
- } else {
- heap()->CreateFillerObjectAt(start, size_in_bytes);
+ intptr_t freed_bytes = 0;
+ Page* p = first_unswept_page_;
+ do {
+ Page* next_page = p->next_page();
+ if (ShouldBeSweptLazily(p)) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
+ reinterpret_cast<intptr_t>(p));
}
+ freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
+ p = next_page;
+ } while (p != anchor() && freed_bytes < bytes_to_sweep);
- // New last in use page was in the middle of the list before
- // sorting so it full.
- SetTop(new_last_in_use->AllocationTop());
-
- ASSERT(AllocationTopPage() == new_last_in_use);
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+ if (p == anchor()) {
+ first_unswept_page_ = Page::FromAddress(NULL);
+ } else {
+ first_unswept_page_ = p;
}
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
- while (pages_in_use_iterator.has_next()) {
- Page* p = pages_in_use_iterator.next();
- if (!p->WasInUseBeforeMC()) {
- // Empty page is in the middle of a sequence of used pages.
- // Allocate it as a whole and deallocate immediately.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
- p->ObjectAreaStart());
+ heap()->LowerOldGenLimits(freed_bytes);
- p->SetAllocationWatermark(p->ObjectAreaStart());
- Address start = p->ObjectAreaStart();
- if (deallocate_blocks) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
- } else {
- heap()->CreateFillerObjectAt(start, size_in_bytes);
- }
- }
- }
+ heap()->FreeQueuedChunks();
- page_list_is_chunk_ordered_ = true;
+ return IsSweepingComplete();
}
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
- if (will_compact) {
- RelinkPageListInChunkOrder(false);
- }
-}
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+ if (allocation_info_.top >= allocation_info_.limit) return;
+ if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+ // Create filler object to keep page iterable if it was iterable.
+ int remaining =
+ static_cast<int>(allocation_info_.limit - allocation_info_.top);
+ heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
-bool PagedSpace::ReserveSpace(int bytes) {
- Address limit = allocation_info_.limit;
- Address top = allocation_info_.top;
- if (limit - top >= bytes) return true;
-
- // There wasn't enough space in the current page. Lets put the rest
- // of the page on the free list and start a fresh page.
- PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
-
- Page* reserved_page = TopPageOf(allocation_info_);
- int bytes_left_to_reserve = bytes;
- while (bytes_left_to_reserve > 0) {
- if (!reserved_page->next_page()->is_valid()) {
- if (heap()->OldGenerationAllocationLimitReached()) return false;
- Expand(reserved_page);
- }
- bytes_left_to_reserve -= Page::kPageSize;
- reserved_page = reserved_page->next_page();
- if (!reserved_page->is_valid()) return false;
- }
- ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
- TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
- SetAllocationInfo(&allocation_info_,
- TopPageOf(allocation_info_)->next_page());
- return true;
+ allocation_info_.top = NULL;
+ allocation_info_.limit = NULL;
+ }
}
-// You have to call this last, since the implementation from PagedSpace
-// doesn't know that memory was 'promised' to large object space.
-bool LargeObjectSpace::ReserveSpace(int bytes) {
- return heap()->OldGenerationSpaceAvailable() >= bytes;
-}
+HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+ // Allocation in this space has failed.
+ // If there are unswept pages advance lazy sweeper then sweep one page before
+ // allocating a new page.
+ if (first_unswept_page_->is_valid()) {
+ AdvanceSweeper(size_in_bytes);
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
- // Linear allocation in this space has failed. If there is another page
- // in the space, move to that page and allocate there. This allocation
- // should succeed (size_in_bytes should not be greater than a page's
- // object area size).
- Page* current_page = TopPageOf(allocation_info_);
- if (current_page->next_page()->is_valid()) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // There is no next page in this space. Try free list allocation unless that
- // is currently forbidden.
- if (!heap()->linear_allocation()) {
- int wasted_bytes;
- Object* result;
- MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- if (maybe->ToObject(&result)) {
- accounting_stats_.AllocateBytes(size_in_bytes);
-
- HeapObject* obj = HeapObject::cast(result);
- Page* p = Page::FromAddress(obj->address());
-
- if (obj->address() >= p->AllocationWatermark()) {
- // There should be no hole between the allocation watermark
- // and allocated object address.
- // Memory above the allocation watermark was not swept and
- // might contain garbage pointers to new space.
- ASSERT(obj->address() == p->AllocationWatermark());
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
- }
-
- return obj;
- }
+ // Retry the free list allocation.
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ if (object != NULL) return object;
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -2254,60 +2161,22 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
}
// Try to expand the space and allocate in the new next page.
- ASSERT(!current_page->next_page()->is_valid());
- if (Expand(current_page)) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // Finally, fail.
- return NULL;
-}
-
-
-void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
- current_page->SetAllocationWatermark(allocation_info_.top);
- int free_size =
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
- if (free_size > 0) {
- int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
- accounting_stats_.WasteBytes(wasted_bytes);
+ if (Expand()) {
+ return free_list_.Allocate(size_in_bytes);
}
-}
+ // Last ditch, sweep all the remaining pages to try to find space. This may
+ // cause a pause.
+ if (!IsSweepingComplete()) {
+ AdvanceSweeper(kMaxInt);
-void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
- current_page->SetAllocationWatermark(allocation_info_.top);
- int free_size =
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
- // In the fixed space free list all the free list items have the right size.
- // We use up the rest of the page while preserving this invariant.
- while (free_size >= object_size_in_bytes_) {
- free_list_.Free(allocation_info_.top);
- allocation_info_.top += object_size_in_bytes_;
- free_size -= object_size_in_bytes_;
- accounting_stats_.WasteBytes(object_size_in_bytes_);
+ // Retry the free list allocation.
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ if (object != NULL) return object;
}
-}
-
-
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
- ASSERT(current_page->next_page()->is_valid());
- Page* next_page = current_page->next_page();
- next_page->ClearGCFields();
- PutRestOfCurrentPageOnFreeList(current_page);
- SetAllocationInfo(&allocation_info_, next_page);
- return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-void OldSpace::DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist) {
- Free(start, size_in_bytes, add_to_freelist);
+ // Finally, fail.
+ return NULL;
}
@@ -2413,7 +2282,7 @@ static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
void PagedSpace::CollectCodeStatistics() {
Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+ for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
isolate->code_kind_statistics()[code->kind()] += code->Size();
@@ -2438,16 +2307,17 @@ void PagedSpace::CollectCodeStatistics() {
}
-void OldSpace::ReportStatistics() {
+void PagedSpace::ReportStatistics() {
int pct = static_cast<int>(Available() * 100 / Capacity());
PrintF(" capacity: %" V8_PTR_PREFIX "d"
", waste: %" V8_PTR_PREFIX "d"
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
+ if (was_swept_conservatively_) return;
ClearHistograms();
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+ for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
CollectHistogramInfo(obj);
ReportHistogram(true);
}
@@ -2456,192 +2326,28 @@ void OldSpace::ReportStatistics() {
// -----------------------------------------------------------------------------
// FixedSpace implementation
-void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+void FixedSpace::PrepareForMarkCompact() {
// Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact(will_compact);
-
- if (will_compact) {
- // Reset relocation info.
- MCResetRelocationInfo();
+ PagedSpace::PrepareForMarkCompact();
- // During a compacting collection, everything in the space is considered
- // 'available' (set by the call to MCResetRelocationInfo) and we will
- // rediscover live and wasted bytes during the collection.
- ASSERT(Available() == Capacity());
- } else {
- // During a non-compacting collection, everything below the linear
- // allocation pointer except wasted top-of-page blocks is considered
- // allocated and we will rediscover available bytes during the
- // collection.
- accounting_stats_.AllocateBytes(free_list_.available());
- }
+ // During a non-compacting collection, everything below the linear
+ // allocation pointer except wasted top-of-page blocks is considered
+ // allocated and we will rediscover available bytes during the
+ // collection.
+ accounting_stats_.AllocateBytes(free_list_.available());
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
}
-void FixedSpace::MCCommitRelocationInfo() {
- // Update fast allocation info.
- allocation_info_.top = mc_forwarding_info_.top;
- allocation_info_.limit = mc_forwarding_info_.limit;
- ASSERT(allocation_info_.VerifyPagedAllocation());
-
- // The space is compacted and we haven't yet wasted any space.
- ASSERT(Waste() == 0);
-
- // Update allocation_top of each page in use and compute waste.
- int computed_size = 0;
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
- while (it.has_next()) {
- Page* page = it.next();
- Address page_top = page->AllocationTop();
- computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
- if (it.has_next()) {
- accounting_stats_.WasteBytes(
- static_cast<int>(page->ObjectAreaEnd() - page_top));
- page->SetAllocationWatermark(page_top);
- }
- }
-
- // Make sure the computed size - based on the used portion of the
- // pages in use - matches the size we adjust during allocation.
- ASSERT(computed_size == Size());
-}
-
-
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
- // Linear allocation in this space has failed. If there is another page
- // in the space, move to that page and allocate there. This allocation
- // should succeed.
- Page* current_page = TopPageOf(allocation_info_);
- if (current_page->next_page()->is_valid()) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // There is no next page in this space. Try free list allocation unless
- // that is currently forbidden. The fixed space free list implicitly assumes
- // that all free blocks are of the fixed size.
- if (!heap()->linear_allocation()) {
- Object* result;
- MaybeObject* maybe = free_list_.Allocate();
- if (maybe->ToObject(&result)) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- HeapObject* obj = HeapObject::cast(result);
- Page* p = Page::FromAddress(obj->address());
-
- if (obj->address() >= p->AllocationWatermark()) {
- // There should be no hole between the allocation watermark
- // and allocated object address.
- // Memory above the allocation watermark was not swept and
- // might contain garbage pointers to new space.
- ASSERT(obj->address() == p->AllocationWatermark());
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
- }
-
- return obj;
- }
- }
-
- // Free list allocation failed and there is no next page. Fail if we have
- // hit the old generation size limit that should cause a garbage
- // collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
- }
-
- // Try to expand the space and allocate in the new next page.
- ASSERT(!current_page->next_page()->is_valid());
- if (Expand(current_page)) {
- return AllocateInNextPage(current_page, size_in_bytes);
- }
-
- // Finally, fail.
- return NULL;
-}
-
-
-// Move to the next page (there is assumed to be one) and allocate there.
-// The top of page block is always wasted, because it is too small to hold a
-// map.
-HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
- ASSERT(current_page->next_page()->is_valid());
- ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
- Page* next_page = current_page->next_page();
- next_page->ClearGCFields();
- current_page->SetAllocationWatermark(allocation_info_.top);
- accounting_stats_.WasteBytes(page_extra_);
- SetAllocationInfo(&allocation_info_, next_page);
- return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void FixedSpace::DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist) {
- // Free-list elements in fixed space are assumed to have a fixed size.
- // We break the free block into chunks and add them to the free list
- // individually.
- int size = object_size_in_bytes();
- ASSERT(size_in_bytes % size == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += size) {
- Free(a, add_to_freelist);
- }
-}
-
-
-#ifdef DEBUG
-void FixedSpace::ReportStatistics() {
- int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", waste: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Waste(), Available(), pct);
-
- ClearHistograms();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
- CollectHistogramInfo(obj);
- ReportHistogram(false);
-}
-#endif
-
-
// -----------------------------------------------------------------------------
// MapSpace implementation
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
- // Call prepare of the super class.
- FixedSpace::PrepareForMarkCompact(will_compact);
-
- if (will_compact) {
- // Initialize map index entry.
- int page_count = 0;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- ASSERT_MAP_PAGE_INDEX(page_count);
-
- Page* p = it.next();
- ASSERT(p->mc_page_index == page_count);
-
- page_addresses_[page_count++] = p->address();
- }
- }
-}
-
-
#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
- ASSERT(object->IsMap() || object->IsByteArray());
+ ASSERT(object->IsMap() || object->IsFreeSpace());
}
#endif
@@ -2662,107 +2368,43 @@ void CellSpace::VerifyObject(HeapObject* object) {
// LargeObjectIterator
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
- current_ = space->first_chunk_;
+ current_ = space->first_page_;
size_func_ = NULL;
}
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
HeapObjectCallback size_func) {
- current_ = space->first_chunk_;
+ current_ = space->first_page_;
size_func_ = size_func;
}
-HeapObject* LargeObjectIterator::next() {
+HeapObject* LargeObjectIterator::Next() {
if (current_ == NULL) return NULL;
HeapObject* object = current_->GetObject();
- current_ = current_->next();
+ current_ = current_->next_page();
return object;
}
// -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
- Executability executable) {
- size_t requested = ChunkSizeFor(size_in_bytes);
- size_t size;
- size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
- Isolate* isolate = Isolate::Current();
- void* mem = isolate->memory_allocator()->AllocateRawMemory(
- requested + guard_size, &size, executable);
- if (mem == NULL) return NULL;
-
- // The start of the chunk may be overlayed with a page so we have to
- // make sure that the page flags fit in the size field.
- ASSERT((size & Page::kPageFlagMask) == 0);
-
- LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
- if (size < requested + guard_size) {
- isolate->memory_allocator()->FreeRawMemory(
- mem, size, executable);
- LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
- return NULL;
- }
-
- if (guard_size != 0) {
- OS::Guard(mem, guard_size);
- size -= guard_size;
- mem = static_cast<Address>(mem) + guard_size;
- }
-
- ObjectSpace space = (executable == EXECUTABLE)
- ? kObjectSpaceCodeSpace
- : kObjectSpaceLoSpace;
- isolate->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionAllocate, size);
-
- LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
- chunk->size_ = size;
- chunk->GetPage()->heap_ = isolate->heap();
- return chunk;
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
- size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
- ObjectSpace space =
- (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
- // Do not access instance fields after FreeRawMemory!
- Address my_address = address();
- size_t my_size = size();
- Isolate* isolate = GetPage()->heap_->isolate();
- MemoryAllocator* a = isolate->memory_allocator();
- a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
- a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
- LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
-}
-
-
-int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
- int os_alignment = static_cast<int>(OS::AllocateAlignment());
- if (os_alignment < Page::kPageSize) {
- size_in_bytes += (Page::kPageSize - os_alignment);
- }
- return size_in_bytes + Page::kObjectStartOffset;
-}
-
-// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+LargeObjectSpace::LargeObjectSpace(Heap* heap,
+ intptr_t max_capacity,
+ AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- first_chunk_(NULL),
+ max_capacity_(max_capacity),
+ first_page_(NULL),
size_(0),
page_count_(0),
objects_size_(0) {}
bool LargeObjectSpace::Setup() {
- first_chunk_ = NULL;
+ first_page_ = NULL;
size_ = 0;
page_count_ = 0;
objects_size_ = 0;
@@ -2771,20 +2413,22 @@ bool LargeObjectSpace::Setup() {
void LargeObjectSpace::TearDown() {
- while (first_chunk_ != NULL) {
- LargeObjectChunk* chunk = first_chunk_;
- first_chunk_ = first_chunk_->next();
- chunk->Free(chunk->GetPage()->PageExecutability());
+ while (first_page_ != NULL) {
+ LargePage* page = first_page_;
+ first_page_ = first_page_->next_page();
+ LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
+
+ ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+ space, kAllocationActionFree, page->size());
+ heap()->isolate()->memory_allocator()->Free(page);
}
Setup();
}
-MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
- int object_size,
- Executability executable) {
- ASSERT(0 < object_size && object_size <= requested_size);
-
+MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
+ Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
@@ -2792,75 +2436,55 @@ MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
return Failure::RetryAfterGC(identity());
}
- LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
- if (chunk == NULL) {
+ if (Size() + object_size > max_capacity_) {
return Failure::RetryAfterGC(identity());
}
- size_ += static_cast<int>(chunk->size());
- objects_size_ += requested_size;
- page_count_++;
- chunk->set_next(first_chunk_);
- first_chunk_ = chunk;
-
- // Initialize page header.
- Page* page = chunk->GetPage();
- Address object_address = page->ObjectAreaStart();
-
- // Clear the low order bit of the second word in the page to flag it as a
- // large object page. If the chunk_size happened to be written there, its
- // low order bit should already be clear.
- page->SetIsLargeObjectPage(true);
- page->SetPageExecutability(executable);
- page->SetRegionMarks(Page::kAllRegionsCleanMarks);
- return HeapObject::FromAddress(object_address);
-}
-
+ LargePage* page = heap()->isolate()->memory_allocator()->
+ AllocateLargePage(object_size, executable, this);
+ if (page == NULL) return Failure::RetryAfterGC(identity());
+ ASSERT(page->body_size() >= object_size);
-MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes,
- size_in_bytes,
- EXECUTABLE);
-}
+ size_ += static_cast<int>(page->size());
+ objects_size_ += object_size;
+ page_count_++;
+ page->set_next_page(first_page_);
+ first_page_ = page;
+ HeapObject* object = page->GetObject();
-MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes,
- size_in_bytes,
- NOT_EXECUTABLE);
-}
-
+#ifdef DEBUG
+ // Make the object consistent so the heap can be vefified in OldSpaceStep.
+ reinterpret_cast<Object**>(object->address())[0] =
+ heap()->fixed_array_map();
+ reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+#endif
-MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes,
- size_in_bytes,
- NOT_EXECUTABLE);
+ heap()->incremental_marking()->OldSpaceStep(object_size);
+ return object;
}
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
- for (LargeObjectChunk* chunk = first_chunk_;
- chunk != NULL;
- chunk = chunk->next()) {
- Address chunk_address = chunk->address();
- if (chunk_address <= a && a < chunk_address + chunk->size()) {
- return chunk->GetObject();
+ for (LargePage* page = first_page_;
+ page != NULL;
+ page = page->next_page()) {
+ Address page_address = page->address();
+ if (page_address <= a && a < page_address + page->size()) {
+ return page->GetObject();
}
}
return Failure::Exception();
}
-LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
// TODO(853): Change this implementation to only find executable
// chunks and use some kind of hash-based approach to speed it up.
- for (LargeObjectChunk* chunk = first_chunk_;
+ for (LargePage* chunk = first_page_;
chunk != NULL;
- chunk = chunk->next()) {
+ chunk = chunk->next_page()) {
Address chunk_address = chunk->address();
if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
return chunk;
@@ -2870,112 +2494,57 @@ LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
}
-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
- LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays can possibly contain pointers to
- // the young generation.
- if (object->IsFixedArray()) {
- Page* page = Page::FromAddress(object->address());
- uint32_t marks = page->GetRegionMarks();
- uint32_t newmarks = Page::kAllRegionsCleanMarks;
-
- if (marks != Page::kAllRegionsCleanMarks) {
- // For a large page a single dirty mark corresponds to several
- // regions (modulo 32). So we treat a large page as a sequence of
- // normal pages of size Page::kPageSize having same dirty marks
- // and subsequently iterate dirty regions on each of these pages.
- Address start = object->address();
- Address end = page->ObjectAreaEnd();
- Address object_end = start + object->Size();
-
- // Iterate regions of the first normal page covering object.
- uint32_t first_region_number = page->GetRegionNumberForAddress(start);
- newmarks |=
- heap()->IterateDirtyRegions(marks >> first_region_number,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object) << first_region_number;
-
- start = end;
- end = start + Page::kPageSize;
- while (end <= object_end) {
- // Iterate next 32 regions.
- newmarks |=
- heap()->IterateDirtyRegions(marks,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
- start = end;
- end = start + Page::kPageSize;
- }
-
- if (start != object_end) {
- // Iterate the last piece of an object which is less than
- // Page::kPageSize.
- newmarks |=
- heap()->IterateDirtyRegions(marks,
- start,
- object_end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
- }
-
- page->SetRegionMarks(newmarks);
- }
- }
- }
-}
-
-
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargeObjectChunk* previous = NULL;
- LargeObjectChunk* current = first_chunk_;
+ LargePage* previous = NULL;
+ LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- if (object->IsMarked()) {
- object->ClearMark();
- heap()->mark_compact_collector()->tracer()->decrement_marked_count();
+ // Can this large page contain pointers to non-trivial objects. No other
+ // pointer object is this big.
+ bool is_pointer_object = object->IsFixedArray();
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ if (mark_bit.Get()) {
+ mark_bit.Clear();
+ MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
previous = current;
- current = current->next();
+ current = current->next_page();
} else {
+ LargePage* page = current;
// Cut the chunk out from the chunk list.
- LargeObjectChunk* current_chunk = current;
- current = current->next();
+ current = current->next_page();
if (previous == NULL) {
- first_chunk_ = current;
+ first_page_ = current;
} else {
- previous->set_next(current);
+ previous->set_next_page(current);
}
// Free the chunk.
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
object, heap()->isolate());
- LiveObjectList::ProcessNonLive(object);
-
- size_ -= static_cast<int>(current_chunk->size());
+ size_ -= static_cast<int>(page->size());
objects_size_ -= object->Size();
page_count_--;
- current_chunk->Free(current_chunk->GetPage()->PageExecutability());
+
+ if (is_pointer_object) {
+ heap()->QueueMemoryChunkForFree(page);
+ } else {
+ heap()->isolate()->memory_allocator()->Free(page);
+ }
}
}
+ heap()->FreeQueuedChunks();
}
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
- if (heap()->new_space()->Contains(address)) {
- return false;
- }
- Page* page = Page::FromAddress(address);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+
+ bool owned = (chunk->owner() == this);
- SLOW_ASSERT(!page->IsLargeObjectPage()
- || !FindObject(address)->IsFailure());
+ SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
- return page->IsLargeObjectPage();
+ return owned;
}
@@ -2983,9 +2552,9 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
- for (LargeObjectChunk* chunk = first_chunk_;
+ for (LargePage* chunk = first_page_;
chunk != NULL;
- chunk = chunk->next()) {
+ chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
HeapObject* object = chunk->GetObject();
@@ -3015,9 +2584,6 @@ void LargeObjectSpace::Verify() {
object->Size(),
&code_visitor);
} else if (object->IsFixedArray()) {
- // We loop over fixed arrays ourselves, rather then using the visitor,
- // because the visitor doesn't support the start/offset iteration
- // needed for IsRegionDirty.
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
@@ -3025,13 +2591,6 @@ void LargeObjectSpace::Verify() {
HeapObject* element_object = HeapObject::cast(element);
ASSERT(heap()->Contains(element_object));
ASSERT(element_object->map()->IsMap());
- if (heap()->InNewSpace(element_object)) {
- Address array_addr = object->address();
- Address element_addr = array_addr + FixedArray::kHeaderSize +
- j * kPointerSize;
-
- ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
- }
}
}
}
@@ -3041,7 +2600,7 @@ void LargeObjectSpace::Verify() {
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
obj->Print();
}
}
@@ -3052,7 +2611,7 @@ void LargeObjectSpace::ReportStatistics() {
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
num_objects++;
CollectHistogramInfo(obj);
}
@@ -3066,13 +2625,38 @@ void LargeObjectSpace::ReportStatistics() {
void LargeObjectSpace::CollectCodeStatistics() {
Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+ for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
isolate->code_kind_statistics()[code->kind()] += code->Size();
}
}
}
+
+
+void Page::Print() {
+ // Make a best-effort to print the objects in the page.
+ PrintF("Page@%p in %s\n",
+ this->address(),
+ AllocationSpaceName(this->owner()->identity()));
+ printf(" --------------------------------------\n");
+ HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+ unsigned mark_size = 0;
+ for (HeapObject* object = objects.Next();
+ object != NULL;
+ object = objects.Next()) {
+ bool is_marked = Marking::MarkBitFrom(object).Get();
+ PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
+ if (is_marked) {
+ mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+ }
+ object->ShortPrint();
+ PrintF("\n");
+ }
+ printf(" --------------------------------------\n");
+ printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
+}
+
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index f1564967e..41bfec90f 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -49,45 +49,47 @@ class Isolate;
//
// The semispaces of the young generation are contiguous. The old and map
// spaces consists of a list of pages. A page has a page header and an object
-// area. A page size is deliberately chosen as 8K bytes.
-// The first word of a page is an opaque page header that has the
-// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. Heap objects are aligned to the
-// pointer size.
+// area.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged. Pages in large object space
-// may be larger than 8K.
+// may be larger than the page size.
//
-// A card marking write barrier is used to keep track of intergenerational
-// references. Old space pages are divided into regions of Page::kRegionSize
-// size. Each region has a corresponding dirty bit in the page header which is
-// set if the region might contain pointers to new space. For details about
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
-// method body.
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references. See store-buffer.h.
//
-// During scavenges and mark-sweep collections we iterate intergenerational
-// pointers without decoding heap object maps so if the page belongs to old
-// pointer space or large object space it is essential to guarantee that
-// the page does not contain any garbage pointers to new space: every pointer
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a
-// pointer to a live heap object in new space. Thus objects in old pointer
-// and large object spaces should have a special layout (e.g. no bare integer
-// fields). This requirement does not apply to map space which is iterated in
-// a special fashion. However we still require pointer fields of dead maps to
-// be cleaned.
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies
+// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
+// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
//
-// To enable lazy cleaning of old space pages we use a notion of allocation
-// watermark. Every pointer under watermark is considered to be well formed.
-// Page allocation watermark is not necessarily equal to page allocation top but
-// all alive objects on page should reside under allocation watermark.
-// During scavenge allocation watermark might be bumped and invalid pointers
-// might appear below it. To avoid following them we store a valid watermark
-// into special field in the page header and set a page WATERMARK_INVALIDATED
-// flag. For details see comments in the Page::SetAllocationWatermark() method
-// body.
+// To enable lazy cleaning of old space pages we can mark chunks of the page
+// as being garbage. Garbage sections are marked with a special map. These
+// sections are skipped when scanning the page, even if we are otherwise
+// scanning without regard for object boundaries. Garbage sections are chained
+// together to form a free list after a GC. Garbage sections created outside
+// of GCs by object trunctation etc. may not be in the free list chain. Very
+// small free spaces are ignored, they need only be cleaned of bogus pointers
+// into new space.
//
+// Each page may have up to one special garbage section. The start of this
+// section is denoted by the top field in the space. The end of the section
+// is denoted by the limit field in the space. This special garbage section
+// is not marked with a free space map in the data. The point of this section
+// is to enable linear allocation without having to constantly update the byte
+// array every time the top field is updated and a new object is created. The
+// special garbage section is not in the chain of garbage sections.
+//
+// Since the top and limit fields are in the space, not the page, only one page
+// has a special garbage section, and if the top and limit are equal then there
+// is no special garbage section.
// Some assertion macros used in the debugging mode.
@@ -114,30 +116,525 @@ class Isolate;
class PagedSpace;
class MemoryAllocator;
class AllocationInfo;
+class Space;
+class FreeList;
+class MemoryChunk;
+
+class MarkBit {
+ public:
+ typedef uint32_t CellType;
+
+ inline MarkBit(CellType* cell, CellType mask, bool data_only)
+ : cell_(cell), mask_(mask), data_only_(data_only) { }
+
+ inline CellType* cell() { return cell_; }
+ inline CellType mask() { return mask_; }
+
+#ifdef DEBUG
+ bool operator==(const MarkBit& other) {
+ return cell_ == other.cell_ && mask_ == other.mask_;
+ }
+#endif
+
+ inline void Set() { *cell_ |= mask_; }
+ inline bool Get() { return (*cell_ & mask_) != 0; }
+ inline void Clear() { *cell_ &= ~mask_; }
+
+ inline bool data_only() { return data_only_; }
+
+ inline MarkBit Next() {
+ CellType new_mask = mask_ << 1;
+ if (new_mask == 0) {
+ return MarkBit(cell_ + 1, 1, data_only_);
+ } else {
+ return MarkBit(cell_, new_mask, data_only_);
+ }
+ }
+
+ private:
+ CellType* cell_;
+ CellType mask_;
+ // This boolean indicates that the object is in a data-only space with no
+ // pointers. This enables some optimizations when marking.
+ // It is expected that this field is inlined and turned into control flow
+ // at the place where the MarkBit object is created.
+ bool data_only_;
+};
+
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+ static const uint32_t kBitsPerCell = 32;
+ static const uint32_t kBitsPerCellLog2 = 5;
+ static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+ static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+ static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+ static const size_t kLength =
+ (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+ static const size_t kSize =
+ (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
+
+ static int CellsForLength(int length) {
+ return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+ }
+
+ int CellsCount() {
+ return CellsForLength(kLength);
+ }
+
+ static int SizeFor(int cells_count) {
+ return sizeof(MarkBit::CellType) * cells_count;
+ }
+
+ INLINE(static uint32_t IndexToCell(uint32_t index)) {
+ return index >> kBitsPerCellLog2;
+ }
+
+ INLINE(static uint32_t CellToIndex(uint32_t index)) {
+ return index << kBitsPerCellLog2;
+ }
+
+ INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+ return (index + kBitIndexMask) & ~kBitIndexMask;
+ }
+
+ INLINE(MarkBit::CellType* cells()) {
+ return reinterpret_cast<MarkBit::CellType*>(this);
+ }
+
+ INLINE(Address address()) {
+ return reinterpret_cast<Address>(this);
+ }
+
+ INLINE(static Bitmap* FromAddress(Address addr)) {
+ return reinterpret_cast<Bitmap*>(addr);
+ }
+
+ inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
+ MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+ MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+ return MarkBit(cell, mask, data_only);
+ }
+
+ static inline void Clear(MemoryChunk* chunk);
+
+ static void PrintWord(uint32_t word, uint32_t himask = 0) {
+ for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+ if ((mask & himask) != 0) PrintF("[");
+ PrintF((mask & word) ? "1" : "0");
+ if ((mask & himask) != 0) PrintF("]");
+ }
+ }
+
+ class CellPrinter {
+ public:
+ CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+
+ void Print(uint32_t pos, uint32_t cell) {
+ if (cell == seq_type) {
+ seq_length++;
+ return;
+ }
+
+ Flush();
+
+ if (IsSeq(cell)) {
+ seq_start = pos;
+ seq_length = 0;
+ seq_type = cell;
+ return;
+ }
+
+ PrintF("%d: ", pos);
+ PrintWord(cell);
+ PrintF("\n");
+ }
+
+ void Flush() {
+ if (seq_length > 0) {
+ PrintF("%d: %dx%d\n",
+ seq_start,
+ seq_type == 0 ? 0 : 1,
+ seq_length * kBitsPerCell);
+ seq_length = 0;
+ }
+ }
+
+ static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+ private:
+ uint32_t seq_start;
+ uint32_t seq_type;
+ uint32_t seq_length;
+ };
+
+ void Print() {
+ CellPrinter printer;
+ for (int i = 0; i < CellsCount(); i++) {
+ printer.Print(i, cells()[i]);
+ }
+ printer.Flush();
+ PrintF("\n");
+ }
+
+ bool IsClean() {
+ for (int i = 0; i < CellsCount(); i++) {
+ if (cells()[i] != 0) return false;
+ }
+ return true;
+ }
+};
+
+
+class SkipList;
+class SlotsBuffer;
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accomodate
+// any heap object.
+class MemoryChunk {
+ public:
+ // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+ static MemoryChunk* FromAddress(Address a) {
+ return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+ }
+
+ // Only works for addresses in pointer spaces, not data or code spaces.
+ static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ bool is_valid() { return address() != NULL; }
+
+ MemoryChunk* next_chunk() const { return next_chunk_; }
+ MemoryChunk* prev_chunk() const { return prev_chunk_; }
+
+ void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
+ void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+
+ Space* owner() const {
+ if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+ kFailureTag) {
+ return reinterpret_cast<Space*>(owner_ - kFailureTag);
+ } else {
+ return NULL;
+ }
+ }
+
+ void set_owner(Space* space) {
+ ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
+ owner_ = reinterpret_cast<Address>(space) + kFailureTag;
+ ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+ kFailureTag);
+ }
+
+ VirtualMemory* reserved_memory() {
+ return &reservation_;
+ }
+
+ void InitializeReservedMemory() {
+ reservation_.Reset();
+ }
+
+ void set_reserved_memory(VirtualMemory* reservation) {
+ ASSERT_NOT_NULL(reservation);
+ reservation_.TakeControl(reservation);
+ }
+
+ bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+ void initialize_scan_on_scavenge(bool scan) {
+ if (scan) {
+ SetFlag(SCAN_ON_SCAVENGE);
+ } else {
+ ClearFlag(SCAN_ON_SCAVENGE);
+ }
+ }
+ inline void set_scan_on_scavenge(bool scan);
+
+ int store_buffer_counter() { return store_buffer_counter_; }
+ void set_store_buffer_counter(int counter) {
+ store_buffer_counter_ = counter;
+ }
+
+ Address body() { return address() + kObjectStartOffset; }
+
+ Address body_limit() { return address() + size(); }
+
+ int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
+
+ bool Contains(Address addr) {
+ return addr >= body() && addr < address() + size();
+ }
+
+ // Checks whether addr can be a limit of addresses in this page.
+ // It's a limit if it's in the page, or if it's just after the
+ // last byte of the page.
+ bool ContainsLimit(Address addr) {
+ return addr >= body() && addr <= address() + size();
+ }
+
+ enum MemoryChunkFlags {
+ IS_EXECUTABLE,
+ ABOUT_TO_BE_FREED,
+ POINTERS_TO_HERE_ARE_INTERESTING,
+ POINTERS_FROM_HERE_ARE_INTERESTING,
+ SCAN_ON_SCAVENGE,
+ IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
+ IN_TO_SPACE, // All pages in new space has one of these two set.
+ NEW_SPACE_BELOW_AGE_MARK,
+ CONTAINS_ONLY_DATA,
+ EVACUATION_CANDIDATE,
+ RESCAN_ON_EVACUATION,
+
+ // Pages swept precisely can be iterated, hitting only the live objects.
+ // Whereas those swept conservatively cannot be iterated over. Both flags
+ // indicate that marking bits have been cleared by the sweeper, otherwise
+ // marking bits are still intact.
+ WAS_SWEPT_PRECISELY,
+ WAS_SWEPT_CONSERVATIVELY,
+
+ // Last flag, keep at bottom.
+ NUM_MEMORY_CHUNK_FLAGS
+ };
+
+
+ static const int kPointersToHereAreInterestingMask =
+ 1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const int kPointersFromHereAreInterestingMask =
+ 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const int kEvacuationCandidateMask =
+ 1 << EVACUATION_CANDIDATE;
+
+ static const int kSkipEvacuationSlotsRecordingMask =
+ (1 << EVACUATION_CANDIDATE) |
+ (1 << RESCAN_ON_EVACUATION) |
+ (1 << IN_FROM_SPACE) |
+ (1 << IN_TO_SPACE);
+
+
+ void SetFlag(int flag) {
+ flags_ |= static_cast<uintptr_t>(1) << flag;
+ }
+
+ void ClearFlag(int flag) {
+ flags_ &= ~(static_cast<uintptr_t>(1) << flag);
+ }
+
+ void SetFlagTo(int flag, bool value) {
+ if (value) {
+ SetFlag(flag);
+ } else {
+ ClearFlag(flag);
+ }
+ }
+
+ bool IsFlagSet(int flag) {
+ return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+ }
+
+ // Set or clear multiple flags at a time. The flags in the mask
+ // are set to the value in "flags", the rest retain the current value
+ // in flags_.
+ void SetFlags(intptr_t flags, intptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ intptr_t GetFlags() { return flags_; }
+
+ // Manage live byte count (count of bytes known to be live,
+ // because they are marked black).
+ void ResetLiveBytes() {
+ if (FLAG_gc_verbose) {
+ PrintF("ResetLiveBytes:%p:%x->0\n",
+ static_cast<void*>(this), live_byte_count_);
+ }
+ live_byte_count_ = 0;
+ }
+ void IncrementLiveBytes(int by) {
+ if (FLAG_gc_verbose) {
+ printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
+ static_cast<void*>(this), live_byte_count_,
+ ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+ live_byte_count_ + by);
+ }
+ live_byte_count_ += by;
+ ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
+ }
+ int LiveBytes() {
+ ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
+ return live_byte_count_;
+ }
+ static void IncrementLiveBytes(Address address, int by) {
+ MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+ }
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+
+ static const intptr_t kLiveBytesOffset =
+ kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
+ kPointerSize + kPointerSize + kPointerSize + kIntSize;
+
+ static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
+
+ static const size_t kHeaderSize =
+ kSlotsBufferOffset + kPointerSize + kPointerSize;
+
+ static const int kBodyOffset =
+ CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
+
+ // The start offset of the object area in a page. Aligned to both maps and
+ // code alignment to be suitable for both. Also aligned to 32 words because
+ // the marking bitmap is arranged in 32 bit chunks.
+ static const int kObjectStartAlignment = 32 * kPointerSize;
+ static const int kObjectStartOffset = kBodyOffset - 1 +
+ (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+ size_t size() const { return size_; }
+
+ void set_size(size_t size) {
+ size_ = size;
+ }
+
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
+
+ bool ContainsOnlyData() {
+ return IsFlagSet(CONTAINS_ONLY_DATA);
+ }
+
+ bool InNewSpace() {
+ return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+ }
+
+ bool InToSpace() {
+ return IsFlagSet(IN_TO_SPACE);
+ }
+
+ bool InFromSpace() {
+ return IsFlagSet(IN_FROM_SPACE);
+ }
+
+ // ---------------------------------------------------------------------
+ // Markbits support
+
+ inline Bitmap* markbits() {
+ return Bitmap::FromAddress(address() + kHeaderSize);
+ }
+
+ void PrintMarkbits() { markbits()->Print(); }
+
+ inline uint32_t AddressToMarkbitIndex(Address addr) {
+ return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+ }
+
+ inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
+ const intptr_t offset =
+ reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+ return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+ }
+
+ inline Address MarkbitIndexToAddress(uint32_t index) {
+ return this->address() + (index << kPointerSizeLog2);
+ }
+
+ void InsertAfter(MemoryChunk* other);
+ void Unlink();
+
+ inline Heap* heap() { return heap_; }
+
+ static const int kFlagsOffset = kPointerSize * 3;
+
+ bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+
+ bool ShouldSkipEvacuationSlotRecording() {
+ return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+ }
+
+ inline SkipList* skip_list() {
+ return skip_list_;
+ }
+
+ inline void set_skip_list(SkipList* skip_list) {
+ skip_list_ = skip_list;
+ }
+
+ inline SlotsBuffer* slots_buffer() {
+ return slots_buffer_;
+ }
+
+ inline SlotsBuffer** slots_buffer_address() {
+ return &slots_buffer_;
+ }
+
+ void MarkEvacuationCandidate() {
+ ASSERT(slots_buffer_ == NULL);
+ SetFlag(EVACUATION_CANDIDATE);
+ }
+
+ void ClearEvacuationCandidate() {
+ ASSERT(slots_buffer_ == NULL);
+ ClearFlag(EVACUATION_CANDIDATE);
+ }
+
+
+ protected:
+ MemoryChunk* next_chunk_;
+ MemoryChunk* prev_chunk_;
+ size_t size_;
+ intptr_t flags_;
+ // If the chunk needs to remember its memory reservation, it is stored here.
+ VirtualMemory reservation_;
+ // The identity of the owning space. This is tagged as a failure pointer, but
+ // no failure can be in an object, so this can be distinguished from any entry
+ // in a fixed array.
+ Address owner_;
+ Heap* heap_;
+ // Used by the store buffer to keep track of which pages to mark scan-on-
+ // scavenge.
+ int store_buffer_counter_;
+ // Count of bytes marked black on page.
+ int live_byte_count_;
+ SlotsBuffer* slots_buffer_;
+ SkipList* skip_list_;
+
+ static MemoryChunk* Initialize(Heap* heap,
+ Address base,
+ size_t size,
+ Executability executable,
+ Space* owner);
+
+ friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
-// A page normally has 8K bytes. Large object pages may be larger. A page
-// address is always aligned to the 8K page size.
-//
-// Each page starts with a header of Page::kPageHeaderSize size which contains
-// bookkeeping data.
-//
-// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The exact encoding is described in the comments for
-// class MapWord in objects.h.
+// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
// Page* p = Page::FromAllocationTop(top);
-class Page {
+class Page : public MemoryChunk {
public:
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[
- //
- // Note that this function only works for addresses in normal paged
- // spaces and addresses in the first 8K of large object pages (i.e.,
- // the start of large objects but not necessarily derived pointers
- // within them).
+ // This only works if the object is in fact in a page. See also MemoryChunk::
+ // FromAddress() and FromAnyAddress().
INLINE(static Page* FromAddress(Address a)) {
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
}
@@ -148,34 +645,14 @@ class Page {
// [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page* FromAllocationTop(Address top)) {
Page* p = FromAddress(top - kPointerSize);
- ASSERT_PAGE_OFFSET(p->Offset(top));
return p;
}
- // Returns the start address of this page.
- Address address() { return reinterpret_cast<Address>(this); }
-
- // Checks whether this is a valid page address.
- bool is_valid() { return address() != NULL; }
-
- // Returns the next page of this page.
+ // Returns the next page in the chain of pages owned by a space.
inline Page* next_page();
-
- // Return the end of allocation in this page. Undefined for unused pages.
- inline Address AllocationTop();
-
- // Return the allocation watermark for the page.
- // For old space pages it is guaranteed that the area under the watermark
- // does not contain any garbage pointers to new space.
- inline Address AllocationWatermark();
-
- // Return the allocation watermark offset from the beginning of the page.
- inline uint32_t AllocationWatermarkOffset();
-
- inline void SetAllocationWatermark(Address allocation_watermark);
-
- inline void SetCachedAllocationWatermark(Address allocation_watermark);
- inline Address CachedAllocationWatermark();
+ inline Page* prev_page();
+ inline void set_next_page(Page* page);
+ inline void set_prev_page(Page* page);
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
@@ -188,26 +665,9 @@ class Page {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
}
- // True if this page was in use before current compaction started.
- // Result is valid only for pages owned by paged spaces and
- // only after PagedSpace::PrepareForMarkCompact was called.
- inline bool WasInUseBeforeMC();
-
- inline void SetWasInUseBeforeMC(bool was_in_use);
-
- // True if this page is a large object page.
- inline bool IsLargeObjectPage();
-
- inline void SetIsLargeObjectPage(bool is_large_object_page);
-
- inline Executability PageExecutability();
-
- inline void SetPageExecutability(Executability executable);
-
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
- ASSERT_PAGE_OFFSET(offset);
return offset;
}
@@ -218,24 +678,6 @@ class Page {
}
// ---------------------------------------------------------------------
- // Card marking support
-
- static const uint32_t kAllRegionsCleanMarks = 0x0;
- static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
-
- inline uint32_t GetRegionMarks();
- inline void SetRegionMarks(uint32_t dirty);
-
- inline uint32_t GetRegionMaskForAddress(Address addr);
- inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
- inline int GetRegionNumberForAddress(Address addr);
-
- inline void MarkRegionDirty(Address addr);
- inline bool IsRegionDirty(Address addr);
-
- inline void ClearRegionMarks(Address start,
- Address end,
- bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -243,118 +685,69 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
- static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
- kIntSize + kPointerSize + kPointerSize;
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both.
- static const int kObjectStartOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
-
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
- static const int kDirtyFlagOffset = 2 * kPointerSize;
- static const int kRegionSizeLog2 = 8;
- static const int kRegionSize = 1 << kRegionSizeLog2;
- static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
+ static const int kFirstUsedCell =
+ (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
- STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
+ static const int kLastUsedCell =
+ ((kPageSize - kPointerSize)/kPointerSize) >>
+ Bitmap::kBitsPerCellLog2;
- enum PageFlag {
- IS_NORMAL_PAGE = 0,
- WAS_IN_USE_BEFORE_MC,
+ inline void ClearGCFields();
- // Page allocation watermark was bumped by preallocation during scavenge.
- // Correct watermark can be retrieved by CachedAllocationWatermark() method
- WATERMARK_INVALIDATED,
- IS_EXECUTABLE,
- NUM_PAGE_FLAGS // Must be last
- };
- static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
-
- // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
- // scavenge we just invalidate the watermark on each old space page after
- // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
- // flag at the beginning of the next scavenge and each page becomes marked as
- // having a valid watermark.
- //
- // The following invariant must hold for pages in old pointer and map spaces:
- // If page is in use then page is marked as having invalid watermark at
- // the beginning and at the end of any GC.
- //
- // This invariant guarantees that after flipping flag meaning at the
- // beginning of scavenge all pages in use will be marked as having valid
- // watermark.
- static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
-
- // Returns true if the page allocation watermark was not altered during
- // scavenge.
- inline bool IsWatermarkValid();
+ static inline Page* Initialize(Heap* heap,
+ MemoryChunk* chunk,
+ Executability executable,
+ PagedSpace* owner);
- inline void InvalidateWatermark(bool value);
+ void InitializeAsAnchor(PagedSpace* owner);
- inline bool GetPageFlag(PageFlag flag);
- inline void SetPageFlag(PageFlag flag, bool value);
- inline void ClearPageFlags();
+ bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
+ bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
+ bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
- inline void ClearGCFields();
+ void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
+ void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
- static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
- static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
- static const uint32_t kAllocationWatermarkOffsetMask =
- ((1 << kAllocationWatermarkOffsetBits) - 1) <<
- kAllocationWatermarkOffsetShift;
-
- static const uint32_t kFlagsMask =
- ((1 << kAllocationWatermarkOffsetShift) - 1);
-
- STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
- kAllocationWatermarkOffsetBits);
-
- //---------------------------------------------------------------------------
- // Page header description.
- //
- // If a page is not in the large object space, the first word,
- // opaque_header, encodes the next page address (aligned to kPageSize 8K)
- // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
- // opaque_header. The value range of the opaque_header is [0..kPageSize[,
- // or [next_page_start, next_page_end[. It cannot point to a valid address
- // in the current page. If a page is in the large object space, the first
- // word *may* (if the page start and large object chunk start are the
- // same) contain the address of the next large object chunk.
- intptr_t opaque_header;
-
- // If the page is not in the large object space, the low-order bit of the
- // second word is set. If the page is in the large object space, the
- // second word *may* (if the page start and large object chunk start are
- // the same) contain the large object chunk size. In either case, the
- // low-order bit for large object pages will be cleared.
- // For normal pages this word is used to store page flags and
- // offset of allocation top.
- intptr_t flags_;
+ void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
+ void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+
+#ifdef DEBUG
+ void Print();
+#endif // DEBUG
- // This field contains dirty marks for regions covering the page. Only dirty
- // regions might contain intergenerational references.
- // Only 32 dirty marks are supported so for large object pages several regions
- // might be mapped to a single dirty mark.
- uint32_t dirty_regions_;
+ friend class MemoryAllocator;
+};
- // The index of the page in its owner space.
- int mc_page_index;
- // During mark-compact collections this field contains the forwarding address
- // of the first live object in this page.
- // During scavenge collection this field is used to store allocation watermark
- // if it is altered during scavenge.
- Address mc_first_forwarded;
+STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
- Heap* heap_;
+
+class LargePage : public MemoryChunk {
+ public:
+ HeapObject* GetObject() {
+ return HeapObject::FromAddress(body());
+ }
+
+ inline LargePage* next_page() const {
+ return static_cast<LargePage*>(next_chunk());
+ }
+
+ inline void set_next_page(LargePage* page) {
+ set_next_chunk(page);
+ }
+ private:
+ static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+
+ friend class MemoryAllocator;
};
+STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
@@ -380,6 +773,14 @@ class Space : public Malloced {
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (id_ == CODE_SPACE) {
+ return RoundDown(size, kCodeAlignment);
+ } else {
+ return RoundDown(size, kPointerSize);
+ }
+ }
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -430,9 +831,9 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
- size_t* allocated);
- void FreeRawMemory(void* buf, size_t length);
+ MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
+ size_t* allocated);
+ void FreeRawMemory(Address buf, size_t length);
private:
Isolate* isolate_;
@@ -443,9 +844,15 @@ class CodeRange {
class FreeBlock {
public:
FreeBlock(Address start_arg, size_t size_arg)
- : start(start_arg), size(size_arg) {}
+ : start(start_arg), size(size_arg) {
+ ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+ ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+ }
FreeBlock(void* start_arg, size_t size_arg)
- : start(static_cast<Address>(start_arg)), size(size_arg) {}
+ : start(static_cast<Address>(start_arg)), size(size_arg) {
+ ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+ ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+ }
Address start;
size_t size;
@@ -473,30 +880,63 @@ class CodeRange {
};
+class SkipList {
+ public:
+ SkipList() {
+ Clear();
+ }
+
+ void Clear() {
+ for (int idx = 0; idx < kSize; idx++) {
+ starts_[idx] = reinterpret_cast<Address>(-1);
+ }
+ }
+
+ Address StartFor(Address addr) {
+ return starts_[RegionNumber(addr)];
+ }
+
+ void AddObject(Address addr, int size) {
+ int start_region = RegionNumber(addr);
+ int end_region = RegionNumber(addr + size - kPointerSize);
+ for (int idx = start_region; idx <= end_region; idx++) {
+ if (starts_[idx] > addr) starts_[idx] = addr;
+ }
+ }
+
+ static inline int RegionNumber(Address addr) {
+ return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+ }
+
+ static void Update(Address addr, int size) {
+ Page* page = Page::FromAddress(addr);
+ SkipList* list = page->skip_list();
+ if (list == NULL) {
+ list = new SkipList();
+ page->set_skip_list(list);
+ }
+
+ list->AddObject(addr, size);
+ }
+
+ private:
+ static const int kRegionSizeLog2 = 13;
+ static const int kRegionSize = 1 << kRegionSizeLog2;
+ static const int kSize = Page::kPageSize / kRegionSize;
+
+ STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
+
+ Address starts_[kSize];
+};
+
+
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
-// allocator manages chunks for the paged heap spaces (old space and map
-// space). A paged chunk consists of pages. Pages in a chunk have contiguous
-// addresses and are linked as a list.
-//
-// The allocator keeps an initial chunk which is used for the new space. The
-// leftover regions of the initial chunk are used for the initial chunks of
-// old space and map space if they are big enough to hold at least one page.
-// The allocator assumes that there is one old space and one map space, each
-// expands the space by allocating kPagesPerChunk pages except the last
-// expansion (before running out of space). The first chunk may contain fewer
-// than kPagesPerChunk pages as well.
-//
-// The memory allocator also allocates chunks for the large object space, but
-// they are managed by the space itself. The new space does not expand.
+// allocator allocated and deallocates pages for the paged heap spaces and large
+// pages for large object space.
//
-// The fact that pages for paged spaces are allocated and deallocated in chunks
-// induces a constraint on the order of pages in a linked lists. We say that
-// pages are linked in the chunk-order if and only if every two consecutive
-// pages from the same chunk are consecutive in the linked list.
+// Each space has to manage it's own pages.
//
-
-
class MemoryAllocator {
public:
explicit MemoryAllocator(Isolate* isolate);
@@ -505,91 +945,15 @@ class MemoryAllocator {
// Max capacity of the total space and executable memory limit.
bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
- // Deletes valid chunks.
void TearDown();
- // Reserves an initial address range of virtual memory to be split between
- // the two new space semispaces, the old space, and the map space. The
- // memory is not yet committed or assigned to spaces and split into pages.
- // The initial chunk is unmapped when the memory allocator is torn down.
- // This function should only be called when there is not already a reserved
- // initial chunk (initial_chunk_ should be NULL). It returns the start
- // address of the initial chunk if successful, with the side effect of
- // setting the initial chunk, or else NULL if unsuccessful and leaves the
- // initial chunk NULL.
- void* ReserveInitialChunk(const size_t requested);
-
- // Commits pages from an as-yet-unmanaged block of virtual memory into a
- // paged space. The block should be part of the initial chunk reserved via
- // a call to ReserveInitialChunk. The number of pages is always returned in
- // the output parameter num_pages. This function assumes that the start
- // address is non-null and that it is big enough to hold at least one
- // page-aligned page. The call always succeeds, and num_pages is always
- // greater than zero.
- Page* CommitPages(Address start, size_t size, PagedSpace* owner,
- int* num_pages);
-
- // Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not NULL, the size is greater than zero, and that the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool CommitBlock(Address start, size_t size, Executability executable);
-
- // Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not NULL, the size is greater than zero, and the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool UncommitBlock(Address start, size_t size);
+ Page* AllocatePage(PagedSpace* owner, Executability executable);
- // Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-NULL bit pattern.
- void ZapBlock(Address start, size_t size);
-
- // Attempts to allocate the requested (non-zero) number of pages from the
- // OS. Fewer pages might be allocated than requested. If it fails to
- // allocate memory for the OS or cannot allocate a single page, this
- // function returns an invalid page pointer (NULL). The caller must check
- // whether the returned page is valid (by calling Page::is_valid()). It is
- // guaranteed that allocated pages have contiguous addresses. The actual
- // number of allocated pages is returned in the output parameter
- // allocated_pages. If the PagedSpace owner is executable and there is
- // a code range, the pages are allocated from the code range.
- Page* AllocatePages(int requested_pages, int* allocated_pages,
- PagedSpace* owner);
-
- // Frees pages from a given page and after. Requires pages to be
- // linked in chunk-order (see comment for class).
- // If 'p' is the first page of a chunk, pages from 'p' are freed
- // and this function returns an invalid page pointer.
- // Otherwise, the function searches a page after 'p' that is
- // the first page of a chunk. Pages after the found page
- // are freed and the function returns 'p'.
- Page* FreePages(Page* p);
-
- // Frees all pages owned by given space.
- void FreeAllPages(PagedSpace* space);
-
- // Allocates and frees raw memory of certain size.
- // These are just thin wrappers around OS::Allocate and OS::Free,
- // but keep track of allocated bytes as part of heap.
- // If the flag is EXECUTABLE and a code range exists, the requested
- // memory is allocated from the code range. If a code range exists
- // and the freed memory is in it, the code range manages the freed memory.
- MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
- size_t* allocated,
- Executability executable);
- void FreeRawMemory(void* buf,
- size_t length,
- Executability executable);
- void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
+ LargePage* AllocateLargePage(intptr_t object_size,
+ Executability executable,
+ Space* owner);
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
- void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
- bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
+ void Free(MemoryChunk* chunk);
// Returns the maximum available bytes of heaps.
intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -611,67 +975,68 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
- // Links two pages.
- inline void SetNextPage(Page* prev, Page* next);
+#ifdef DEBUG
+ // Reports statistic info of the space.
+ void ReportStatistics();
+#endif
- // Returns the next page of a given page.
- inline Page* GetNextPage(Page* p);
+ MemoryChunk* AllocateChunk(intptr_t body_size,
+ Executability executable,
+ Space* space);
- // Checks whether a page belongs to a space.
- inline bool IsPageInSpace(Page* p, PagedSpace* space);
+ Address ReserveAlignedMemory(size_t requested,
+ size_t alignment,
+ VirtualMemory* controller);
+ Address AllocateAlignedMemory(size_t requested,
+ size_t alignment,
+ Executability executable,
+ VirtualMemory* controller);
- // Returns the space that owns the given page.
- inline PagedSpace* PageOwner(Page* page);
+ void FreeMemory(VirtualMemory* reservation, Executability executable);
+ void FreeMemory(Address addr, size_t size, Executability executable);
- // Finds the first/last page in the same chunk as a given page.
- Page* FindFirstPageInSameChunk(Page* p);
- Page* FindLastPageInSameChunk(Page* p);
+ // Commit a contiguous block of memory from the initial chunk. Assumes that
+ // the address is not NULL, the size is greater than zero, and that the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ bool CommitBlock(Address start, size_t size, Executability executable);
- // Relinks list of pages owned by space to make it chunk-ordered.
- // Returns new first and last pages of space.
- // Also returns last page in relinked list which has WasInUsedBeforeMC
- // flag set.
- void RelinkPageListInChunkOrder(PagedSpace* space,
- Page** first_page,
- Page** last_page,
- Page** last_page_in_use);
+ // Uncommit a contiguous block of memory [start..(start+size)[.
+ // start is not NULL, the size is greater than zero, and the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ bool UncommitBlock(Address start, size_t size);
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
+ // Zaps a contiguous block of memory [start..(start+size)[ thus
+ // filling it up with a recognizable non-NULL bit pattern.
+ void ZapBlock(Address start, size_t size);
- // Due to encoding limitation, we can only have 8K chunks.
- static const int kMaxNofChunks = 1 << kPageSizeBits;
- // If a chunk has at least 16 pages, the maximum heap size is about
- // 8K * 8K * 16 = 1G bytes.
-#ifdef V8_TARGET_ARCH_X64
- static const int kPagesPerChunk = 32;
- // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
- static const int kChunkTableLevels = 4;
- static const int kChunkTableBitsPerLevel = 12;
-#else
- static const int kPagesPerChunk = 16;
- // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
- static const int kChunkTableLevels = 2;
- static const int kChunkTableBitsPerLevel = 8;
-#endif
+ void PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size);
- private:
- static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+
+ void RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback);
+
+ bool MemoryAllocationCallbackRegistered(
+ MemoryAllocationCallback callback);
+ private:
Isolate* isolate_;
// Maximum space size in bytes.
- intptr_t capacity_;
+ size_t capacity_;
// Maximum subset of capacity_ that can be executable
- intptr_t capacity_executable_;
+ size_t capacity_executable_;
// Allocated space size in bytes.
- intptr_t size_;
-
+ size_t size_;
// Allocated executable space size in bytes.
- intptr_t size_executable_;
+ size_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -683,64 +1048,11 @@ class MemoryAllocator {
ObjectSpace space;
AllocationAction action;
};
+
// A List of callback that are triggered when memory is allocated or free'd
List<MemoryAllocationCallbackRegistration>
memory_allocation_callbacks_;
- // The initial chunk of virtual memory.
- VirtualMemory* initial_chunk_;
-
- // Allocated chunk info: chunk start address, chunk size, and owning space.
- class ChunkInfo BASE_EMBEDDED {
- public:
- ChunkInfo() : address_(NULL),
- size_(0),
- owner_(NULL),
- executable_(NOT_EXECUTABLE),
- owner_identity_(FIRST_SPACE) {}
- inline void init(Address a, size_t s, PagedSpace* o);
- Address address() { return address_; }
- size_t size() { return size_; }
- PagedSpace* owner() { return owner_; }
- // We save executability of the owner to allow using it
- // when collecting stats after the owner has been destroyed.
- Executability executable() const { return executable_; }
- AllocationSpace owner_identity() const { return owner_identity_; }
-
- private:
- Address address_;
- size_t size_;
- PagedSpace* owner_;
- Executability executable_;
- AllocationSpace owner_identity_;
- };
-
- // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
- List<ChunkInfo> chunks_;
- List<int> free_chunk_ids_;
- int max_nof_chunks_;
- int top_;
-
- // Push/pop a free chunk id onto/from the stack.
- void Push(int free_chunk_id);
- int Pop();
- bool OutOfChunkIds() { return top_ == 0; }
-
- // Frees a chunk.
- void DeleteChunk(int chunk_id);
-
- // Basic check whether a chunk id is in the valid range.
- inline bool IsValidChunkId(int chunk_id);
-
- // Checks whether a chunk id identifies an allocated chunk.
- inline bool IsValidChunk(int chunk_id);
-
- // Returns the chunk id that a page belongs to.
- inline int GetChunkId(Page* p);
-
- // True if the address lies in the initial chunk.
- inline bool InInitialChunk(Address address);
-
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
@@ -748,13 +1060,7 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
- Page* RelinkPagesInChunk(int chunk_id,
- Address chunk_start,
- size_t chunk_size,
- Page* prev,
- Page** last_page_in_use);
-
- DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -777,111 +1083,67 @@ class ObjectIterator : public Malloced {
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
-// A HeapObjectIterator iterates objects from a given address to the
-// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
-//
-// (1) If the space top changes upward during iteration (because of
-// allocating new objects), the iterator does not iterate objects
-// above the original space top. The caller must create a new
-// iterator starting from the old top in order to visit these new
-// objects.
-//
-// (2) If new objects are allocated below the original allocation top
-// (e.g., free-list allocation in paged spaces), the new objects
-// may or may not be iterated depending on their position with
-// respect to the current point of iteration.
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to its top or from the bottom of the given page to its top.
//
-// (3) The space top should not change downward during iteration,
-// otherwise the iterator will return not-necessarily-valid
-// objects.
-
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects. The caller must create a new
+// iterator in order to be sure to visit these new objects.
class HeapObjectIterator: public ObjectIterator {
public:
- // Creates a new object iterator in a given space. If a start
- // address is not given, the iterator starts from the space bottom.
+ // Creates a new object iterator in a given space.
// If the size function is not given, the iterator calls the default
// Object::Size().
explicit HeapObjectIterator(PagedSpace* space);
HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
- HeapObjectIterator(PagedSpace* space, Address start);
- HeapObjectIterator(PagedSpace* space,
- Address start,
- HeapObjectCallback size_func);
HeapObjectIterator(Page* page, HeapObjectCallback size_func);
- inline HeapObject* next() {
- return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+ // Advance to the next object, skipping free spaces and other fillers and
+ // skipping the special garbage section of which there is one per space.
+ // Returns NULL when the iteration has ended.
+ inline HeapObject* Next() {
+ do {
+ HeapObject* next_obj = FromCurrentPage();
+ if (next_obj != NULL) return next_obj;
+ } while (AdvanceToNextPage());
+ return NULL;
}
- // implementation of ObjectIterator.
- virtual HeapObject* next_object() { return next(); }
+ virtual HeapObject* next_object() {
+ return Next();
+ }
private:
- Address cur_addr_; // current iteration point
- Address end_addr_; // end iteration point
- Address cur_limit_; // current page limit
- HeapObjectCallback size_func_; // size function
- Page* end_page_; // caches the page of the end address
-
- HeapObject* FromCurrentPage() {
- ASSERT(cur_addr_ < cur_limit_);
+ enum PageMode { kOnePageOnly, kAllPagesInSpace };
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- ASSERT_OBJECT_SIZE(obj_size);
-
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_limit_);
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
+ HeapObjectCallback size_func_; // Size function or NULL.
+ PagedSpace* space_;
+ PageMode page_mode_;
- return obj;
- }
+ // Fast (inlined) path of next().
+ inline HeapObject* FromCurrentPage();
- // Slow path of next, goes into the next page.
- HeapObject* FromNextPage();
+ // Slow path of next(), goes into the next page. Returns false if the
+ // iteration has ended.
+ bool AdvanceToNextPage();
// Initializes fields.
- void Initialize(Address start, Address end, HeapObjectCallback size_func);
-
-#ifdef DEBUG
- // Verifies whether fields have valid values.
- void Verify();
-#endif
+ inline void Initialize(PagedSpace* owner,
+ Address start,
+ Address end,
+ PageMode mode,
+ HeapObjectCallback size_func);
};
// -----------------------------------------------------------------------------
// A PageIterator iterates the pages in a paged space.
-//
-// The PageIterator class provides three modes for iterating pages in a space:
-// PAGES_IN_USE iterates pages containing allocated objects.
-// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
-// mark-compact collection.
-// ALL_PAGES iterates all pages in the space.
-//
-// There are some caveats.
-//
-// (1) If the space expands during iteration, new pages will not be
-// returned by the iterator in any mode.
-//
-// (2) If new objects are allocated during iteration, they will appear
-// in pages returned by the iterator. Allocation may cause the
-// allocation pointer or MC allocation pointer in the last page to
-// change between constructing the iterator and iterating the last
-// page.
-//
-// (3) The space should not shrink during iteration, otherwise the
-// iterator will return deallocated pages.
class PageIterator BASE_EMBEDDED {
public:
- enum Mode {
- PAGES_IN_USE,
- PAGES_USED_BY_MC,
- ALL_PAGES
- };
-
- PageIterator(PagedSpace* space, Mode mode);
+ explicit inline PageIterator(PagedSpace* space);
inline bool has_next();
inline Page* next();
@@ -889,21 +1151,25 @@ class PageIterator BASE_EMBEDDED {
private:
PagedSpace* space_;
Page* prev_page_; // Previous page returned.
- Page* stop_page_; // Page to stop at (last page returned by the iterator).
+ // Next page that will be returned. Cached here so that we can use this
+ // iterator for operations that deallocate pages.
+ Page* next_page_;
};
// -----------------------------------------------------------------------------
-// A space has a list of pages. The next page can be accessed via
-// Page::next_page() call. The next page of the last page is an
-// invalid page pointer. A space can expand and shrink dynamically.
+// A space has a circular list of pages. The next page can be accessed via
+// Page::next_page() call.
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
public:
- Address top; // current allocation top
- Address limit; // current allocation limit
+ AllocationInfo() : top(NULL), limit(NULL) {
+ }
+
+ Address top; // Current allocation top.
+ Address limit; // Current allocation limit.
#ifdef DEBUG
bool VerifyPagedAllocation() {
@@ -935,70 +1201,210 @@ class AllocationStats BASE_EMBEDDED {
// Zero out all the allocation statistics (ie, no capacity).
void Clear() {
capacity_ = 0;
- available_ = 0;
size_ = 0;
waste_ = 0;
}
+ void ClearSizeWaste() {
+ size_ = capacity_;
+ waste_ = 0;
+ }
+
// Reset the allocation statistics (ie, available = capacity with no
// wasted or allocated bytes).
void Reset() {
- available_ = capacity_;
size_ = 0;
waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
- intptr_t Available() { return available_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
- // Grow the space by adding available bytes.
+ // Grow the space by adding available bytes. They are initially marked as
+ // being in use (part of the size), but will normally be immediately freed,
+ // putting them on the free list and removing them from size_.
void ExpandSpace(int size_in_bytes) {
capacity_ += size_in_bytes;
- available_ += size_in_bytes;
+ size_ += size_in_bytes;
+ ASSERT(size_ >= 0);
}
- // Shrink the space by removing available bytes.
+ // Shrink the space by removing available bytes. Since shrinking is done
+ // during sweeping, bytes have been marked as being in use (part of the size)
+ // and are hereby freed.
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
- available_ -= size_in_bytes;
+ size_ -= size_in_bytes;
+ ASSERT(size_ >= 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
- available_ -= size_in_bytes;
size_ += size_in_bytes;
+ ASSERT(size_ >= 0);
}
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- available_ += size_in_bytes;
+ ASSERT(size_ >= 0);
}
// Waste free bytes (available -> waste).
void WasteBytes(int size_in_bytes) {
- available_ -= size_in_bytes;
+ size_ -= size_in_bytes;
waste_ += size_in_bytes;
- }
-
- // Consider the wasted bytes to be allocated, as they contain filler
- // objects (waste -> size).
- void FillWastedBytes(intptr_t size_in_bytes) {
- waste_ -= size_in_bytes;
- size_ += size_in_bytes;
+ ASSERT(size_ >= 0);
}
private:
intptr_t capacity_;
- intptr_t available_;
intptr_t size_;
intptr_t waste_;
};
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap. They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object). They have a size and a next pointer. The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+ // Obtain a free-list node from a raw address. This is not a cast because
+ // it does not check nor require that the first word at the address is a map
+ // pointer.
+ static FreeListNode* FromAddress(Address address) {
+ return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+ }
+
+ static inline bool IsFreeListNode(HeapObject* object);
+
+ // Set the size in bytes, which can be read with HeapObject::Size(). This
+ // function also writes a map to the first word of the block so that it
+ // looks like a heap object to the garbage collector and heap iteration
+ // functions.
+ void set_size(Heap* heap, int size_in_bytes);
+
+ // Accessors for the next field.
+ inline FreeListNode* next();
+ inline FreeListNode** next_address();
+ inline void set_next(FreeListNode* next);
+
+ inline void Zap();
+
+ private:
+ static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space. The free list is organized in such a way
+// as to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer. When the limit is hit we need to
+// find a new space to allocate from. This is done with the free list, which
+// is divided up into rough categories to cut down on waste. Having finer
+// categories would scatter allocation more.
+
+// The old space free list is organized in categories.
+// 1-31 words: Such small free areas are discarded for efficiency reasons.
+// They can be reclaimed by the compactor. However the distance between top
+// and limit may be this small.
+// 32-255 words: There is a list of spaces this large. It is used for top and
+// limit when the object we need to allocate is 1-31 words in size. These
+// spaces are called small.
+// 256-2047 words: There is a list of spaces this large. It is used for top and
+// limit when the object we need to allocate is 32-255 words in size. These
+// spaces are called medium.
+// 1048-16383 words: There is a list of spaces this large. It is used for top
+// and limit when the object we need to allocate is 256-2047 words in size.
+// These spaces are call large.
+// At least 16384 words. This list is for objects of 2048 words or larger.
+// Empty pages are added to this list. These spaces are called huge.
+class FreeList BASE_EMBEDDED {
+ public:
+ explicit FreeList(PagedSpace* owner);
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ intptr_t available() { return available_; }
+
+ // Place a node on the free list. The block of size 'size_in_bytes'
+ // starting at 'start' is placed on the free list. The return value is the
+ // number of bytes that have been lost due to internal fragmentation by
+ // freeing the block. Bookkeeping information will be written to the block,
+ // ie, its contents will be destroyed. The start address should be word
+ // aligned, and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size 'size_in_bytes' from the free list. The block
+ // is unitialized. A failure is returned if no block is available. The
+ // number of bytes lost to fragmentation is returned in the output parameter
+ // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+
+#ifdef DEBUG
+ void Zap();
+ static intptr_t SumFreeList(FreeListNode* node);
+ static int FreeListLength(FreeListNode* cur);
+ intptr_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
+ struct SizeStats {
+ intptr_t Total() {
+ return small_size_ + medium_size_ + large_size_ + huge_size_;
+ }
+
+ intptr_t small_size_;
+ intptr_t medium_size_;
+ intptr_t large_size_;
+ intptr_t huge_size_;
+ };
+
+ void CountFreeListItems(Page* p, SizeStats* sizes);
+
+ intptr_t EvictFreeListItems(Page* p);
+
+ private:
+ // The size range of blocks, in bytes.
+ static const int kMinBlockSize = 3 * kPointerSize;
+ static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+ FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+
+ FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
+ PagedSpace* owner_;
+ Heap* heap_;
+
+ // Total available bytes in all blocks on this free list.
+ int available_;
+
+ static const int kSmallListMin = 0x20 * kPointerSize;
+ static const int kSmallListMax = 0xff * kPointerSize;
+ static const int kMediumListMax = 0x7ff * kPointerSize;
+ static const int kLargeListMax = 0x3fff * kPointerSize;
+ static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+ static const int kMediumAllocationMax = kSmallListMax;
+ static const int kLargeAllocationMax = kMediumListMax;
+ FreeListNode* small_list_;
+ FreeListNode* medium_list_;
+ FreeListNode* large_list_;
+ FreeListNode* huge_list_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
+};
+
+
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
@@ -1013,7 +1419,7 @@ class PagedSpace : public Space {
// the memory allocator's initial chunk) if possible. If the block of
// addresses is not big enough to contain a single page-aligned page, a
// fresh chunk will be allocated.
- bool Setup(Address start, size_t size);
+ bool Setup();
// Returns true if the space has been successfully set up and not
// subsequently torn down.
@@ -1026,8 +1432,6 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
- // Never crashes even if a is not a valid pointer.
- inline bool SafeContains(Address a);
// Given an address occupied by a live object, return that object if it is
// in this space, or Failure::Exception() if it is not. The implementation
@@ -1035,104 +1439,91 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
- // Checks whether page is currently in use by this space.
- bool IsUsed(Page* page);
-
- void MarkAllPagesClean();
-
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact);
+ virtual void PrepareForMarkCompact();
- // The top of allocation in a page in this space. Undefined if page is unused.
- Address PageAllocationTop(Page* page) {
- return page == TopPageOf(allocation_info_) ? top()
- : PageAllocationLimit(page);
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) = 0;
-
- void FlushTopPageWatermark() {
- AllocationTopPage()->SetCachedAllocationWatermark(top());
- AllocationTopPage()->InvalidateWatermark(true);
- }
-
- // Current capacity without growing (Size() + Available() + Waste()).
+ // Current capacity without growing (Size() + Available()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
- // Available bytes without growing.
- intptr_t Available() { return accounting_stats_.Available(); }
+ // Sets the capacity, the available space and the wasted space to zero.
+ // The stats are rebuilt during sweeping by adding each page to the
+ // capacity and the size when it is encountered. As free spaces are
+ // discovered during the sweeping they are subtracted from the size and added
+ // to the available and wasted totals.
+ void ClearStats() {
+ accounting_stats_.ClearSizeWaste();
+ }
+
+ // Available bytes without growing. These are the bytes on the free list.
+ // The bytes in the linear allocation area are not included in this total
+ // because updating the stats would slow down allocation. New pages are
+ // immediately added to the free list so they show up here.
+ intptr_t Available() { return free_list_.available(); }
- // Allocated bytes in this space.
+ // Allocated bytes in this space. Garbage bytes that were not found due to
+ // lazy sweeping are counted as being allocated! The bytes in the current
+ // linear allocation area (between top and limit) are also counted here.
virtual intptr_t Size() { return accounting_stats_.Size(); }
- // Wasted bytes due to fragmentation and not recoverable until the
- // next GC of this space.
- intptr_t Waste() { return accounting_stats_.Waste(); }
+ // As size, but the bytes in the current linear allocation area are not
+ // included.
+ virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
- // Returns the address of the first object in this space.
- Address bottom() { return first_page_->ObjectAreaStart(); }
+ // Wasted bytes in this space. These are just the bytes that were thrown away
+ // due to being too small to use for allocation. They do not include the
+ // free bytes that were not found at all due to lazy sweeping.
+ virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
+ Address top() {
+ return allocation_info_.top;
+ }
+ Address limit() { return allocation_info_.limit; }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
- // Allocate the requested number of bytes for relocation during mark-compact
- // collection.
- MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
-
virtual bool ReserveSpace(int bytes);
- // Used by ReserveSpace.
- virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
-
- // Free all pages in range from prev (exclusive) to last (inclusive).
- // Freed pages are moved to the end of page list.
- void FreePages(Page* prev, Page* last);
-
- // Deallocates a block.
- virtual void DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist) = 0;
+ // Give a block of memory to the space's free list. It might be added to
+ // the free list or accounted as waste.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ int Free(Address start, int size_in_bytes) {
+ int wasted = free_list_.Free(start, size_in_bytes);
+ accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
+ return size_in_bytes - wasted;
+ }
// Set space allocation info.
- void SetTop(Address top) {
+ void SetTop(Address top, Address limit) {
+ ASSERT(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
allocation_info_.top = top;
- allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
+ allocation_info_.limit = limit;
}
- // ---------------------------------------------------------------------------
- // Mark-compact collection support functions
-
- // Set the relocation point to the beginning of the space.
- void MCResetRelocationInfo();
-
- // Writes relocation info to the top page.
- void MCWriteRelocationInfoToPage() {
- TopPageOf(mc_forwarding_info_)->
- SetAllocationWatermark(mc_forwarding_info_.top);
+ void Allocate(int bytes) {
+ accounting_stats_.AllocateBytes(bytes);
}
- // Computes the offset of a given address in this space to the beginning
- // of the space.
- int MCSpaceOffsetForAddress(Address addr);
+ void IncreaseCapacity(int size) {
+ accounting_stats_.ExpandSpace(size);
+ }
- // Updates the allocation pointer to the relocation top after a mark-compact
- // collection.
- virtual void MCCommitRelocationInfo() = 0;
+ // Releases an unused page and shrinks the space.
+ void ReleasePage(Page* page);
- // Releases half of unused pages.
- void Shrink();
+ // Releases all of the unused pages.
+ void ReleaseAllUnusedPages();
- // Ensures that the capacity is at least 'capacity'. Returns false on failure.
- bool EnsureCapacity(int capacity);
+ // The dummy page that anchors the linked list of pages.
+ Page* anchor() { return &anchor_; }
#ifdef DEBUG
// Print meta info and objects in this space.
@@ -1141,6 +1532,9 @@ class PagedSpace : public Space {
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
+ // Reports statistics for the space
+ void ReportStatistics();
+
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
@@ -1151,10 +1545,83 @@ class PagedSpace : public Space {
static void ResetCodeStatistics();
#endif
- // Returns the page of the allocation pointer.
- Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+ bool was_swept_conservatively() { return was_swept_conservatively_; }
+ void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
+
+ // Evacuation candidates are swept by evacuator. Needs to return a valid
+ // result before _and_ after evacuation has finished.
+ static bool ShouldBeSweptLazily(Page* p) {
+ return !p->IsEvacuationCandidate() &&
+ !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
+ !p->WasSweptPrecisely();
+ }
+
+ void SetPagesToSweep(Page* first) {
+ if (first == &anchor_) first = NULL;
+ first_unswept_page_ = first;
+ }
+
+ bool AdvanceSweeper(intptr_t bytes_to_sweep);
+
+ bool IsSweepingComplete() {
+ return !first_unswept_page_->is_valid();
+ }
+
+ Page* FirstPage() { return anchor_.next_page(); }
+ Page* LastPage() { return anchor_.prev_page(); }
+
+ // Returns zero for pages that have so little fragmentation that it is not
+ // worth defragmenting them. Otherwise a positive integer that gives an
+ // estimate of fragmentation on an arbitrary scale.
+ int Fragmentation(Page* p) {
+ FreeList::SizeStats sizes;
+ free_list_.CountFreeListItems(p, &sizes);
+
+ intptr_t ratio;
+ intptr_t ratio_threshold;
+ if (identity() == CODE_SPACE) {
+ ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+ Page::kObjectAreaSize;
+ ratio_threshold = 10;
+ } else {
+ ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+ Page::kObjectAreaSize;
+ ratio_threshold = 15;
+ }
+
+ if (FLAG_trace_fragmentation) {
+ PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+ reinterpret_cast<void*>(p),
+ identity(),
+ static_cast<int>(sizes.small_size_),
+ static_cast<double>(sizes.small_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.medium_size_),
+ static_cast<double>(sizes.medium_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.large_size_),
+ static_cast<double>(sizes.large_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.huge_size_),
+ static_cast<double>(sizes.huge_size_ * 100) /
+ Page::kObjectAreaSize,
+ (ratio > ratio_threshold) ? "[fragmented]" : "");
+ }
+
+ if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+ return 1;
+ }
+ if (ratio <= ratio_threshold) return 0; // Not fragmented.
+
+ return static_cast<int>(ratio - ratio_threshold);
+ }
+
+ void EvictEvacuationCandidatesFromFreeLists();
- void RelinkPageListInChunkOrder(bool deallocate_blocks);
+ bool CanExpand();
+
+ // Returns the number of total pages in this space.
+ int CountTotalPages();
protected:
// Maximum capacity of this space.
@@ -1163,79 +1630,36 @@ class PagedSpace : public Space {
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The first page in this space.
- Page* first_page_;
+ // The dummy page that anchors the double linked list of pages.
+ Page anchor_;
- // The last page in this space. Initially set in Setup, updated in
- // Expand and Shrink.
- Page* last_page_;
-
- // True if pages owned by this space are linked in chunk-order.
- // See comment for class MemoryAllocator for definition of chunk-order.
- bool page_list_is_chunk_ordered_;
+ // The space's free list.
+ FreeList free_list_;
// Normal allocation information.
AllocationInfo allocation_info_;
- // Relocation information during mark-compact collections.
- AllocationInfo mc_forwarding_info_;
-
// Bytes of each page that cannot be allocated. Possibly non-zero
// for pages in spaces with only fixed-size objects. Always zero
// for pages in spaces with variable sized objects (those pages are
// padded with free-list nodes).
int page_extra_;
- // Sets allocation pointer to a page bottom.
- static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
+ bool was_swept_conservatively_;
- // Returns the top page specified by an allocation info structure.
- static Page* TopPageOf(AllocationInfo alloc_info) {
- return Page::FromAllocationTop(alloc_info.limit);
- }
-
- int CountPagesToTop() {
- Page* p = Page::FromAllocationTop(allocation_info_.top);
- PageIterator it(this, PageIterator::ALL_PAGES);
- int counter = 1;
- while (it.has_next()) {
- if (it.next() == p) return counter;
- counter++;
- }
- UNREACHABLE();
- return -1;
- }
+ Page* first_unswept_page_;
// Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS. Newly allocated
- // pages are append to the last_page;
- bool Expand(Page* last_page);
-
- // Generic fast case allocation function that tries linear allocation in
- // the top page of 'alloc_info'. Returns NULL on failure.
- inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
- int size_in_bytes);
+ // it cannot allocate requested number of pages from OS, or if the hard heap
+ // size limit has been hit.
+ bool Expand();
- // During normal allocation or deserialization, roll to the next page in
- // the space (there is assumed to be one) and allocate there. This
- // function is space-dependent.
- virtual HeapObject* AllocateInNextPage(Page* current_page,
- int size_in_bytes) = 0;
+ // Generic fast case allocation function that tries linear allocation at the
+ // address denoted by top in allocation_info_.
+ inline HeapObject* AllocateLinearly(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
-
- // Slow path of MCAllocateRaw.
- MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
-
-#ifdef DEBUG
- // Returns the number of total pages in this space.
- int CountTotalPages();
-#endif
-
- private:
- // Returns a pointer to the page of the relocation pointer.
- Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+ MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
};
@@ -1276,20 +1700,112 @@ class HistogramInfo: public NumberAndSizeInfo {
};
+enum SemiSpaceId {
+ kFromSpace = 0,
+ kToSpace = 1
+};
+
+
+class SemiSpace;
+
+
+class NewSpacePage : public MemoryChunk {
+ public:
+ // GC related flags copied from from-space to to-space when
+ // flipping semispaces.
+ static const intptr_t kCopyOnFlipFlagsMask =
+ (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+
+ inline NewSpacePage* next_page() const {
+ return static_cast<NewSpacePage*>(next_chunk());
+ }
+
+ inline void set_next_page(NewSpacePage* page) {
+ set_next_chunk(page);
+ }
+
+ inline NewSpacePage* prev_page() const {
+ return static_cast<NewSpacePage*>(prev_chunk());
+ }
+
+ inline void set_prev_page(NewSpacePage* page) {
+ set_prev_chunk(page);
+ }
+
+ SemiSpace* semi_space() {
+ return reinterpret_cast<SemiSpace*>(owner());
+ }
+
+ bool is_anchor() { return !this->InNewSpace(); }
+
+ static bool IsAtStart(Address addr) {
+ return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
+ == kObjectStartOffset;
+ }
+
+ static bool IsAtEnd(Address addr) {
+ return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
+ }
+
+ Address address() {
+ return reinterpret_cast<Address>(this);
+ }
+
+ // Finds the NewSpacePage containg the given address.
+ static inline NewSpacePage* FromAddress(Address address_in_page) {
+ Address page_start =
+ reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
+ ~Page::kPageAlignmentMask);
+ NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+ return page;
+ }
+
+ // Find the page for a limit address. A limit address is either an address
+ // inside a page, or the address right after the last byte of a page.
+ static inline NewSpacePage* FromLimit(Address address_limit) {
+ return NewSpacePage::FromAddress(address_limit - 1);
+ }
+
+ private:
+ // Create a NewSpacePage object that is only used as anchor
+ // for the doubly-linked list of real pages.
+ explicit NewSpacePage(SemiSpace* owner) {
+ InitializeAsAnchor(owner);
+ }
+
+ static NewSpacePage* Initialize(Heap* heap,
+ Address start,
+ SemiSpace* semi_space);
+
+ // Intialize a fake NewSpacePage used as sentinel at the ends
+ // of a doubly-linked list of real NewSpacePages.
+ // Only uses the prev/next links, and sets flags to not be in new-space.
+ void InitializeAsAnchor(SemiSpace* owner);
+
+ friend class SemiSpace;
+ friend class SemiSpaceIterator;
+};
+
+
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
-// A semispace is a contiguous chunk of memory. The mark-compact collector
-// uses the memory in the from space as a marking stack when tracing live
-// objects.
+// A semispace is a contiguous chunk of memory holding page-like memory
+// chunks. The mark-compact collector uses the memory of the first page in
+// the from space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
// Constructor.
- explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
- start_ = NULL;
- age_mark_ = NULL;
- }
+ SemiSpace(Heap* heap, SemiSpaceId semispace)
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ start_(NULL),
+ age_mark_(NULL),
+ id_(semispace),
+ anchor_(this),
+ current_page_(NULL) { }
// Sets up the semispace using the given chunk.
bool Setup(Address start, int initial_capacity, int maximum_capacity);
@@ -1301,14 +1817,9 @@ class SemiSpace : public Space {
// True if the space has been set up but not torn down.
bool HasBeenSetup() { return start_ != NULL; }
- // Grow the size of the semispace by committing extra virtual memory.
- // Assumes that the caller has checked that the semispace has not reached
- // its maximum capacity (and thus there is space available in the reserved
- // address range to grow).
- bool Grow();
-
// Grow the semispace to the new capacity. The new capacity
- // requested must be larger than the current capacity.
+ // requested must be larger than the current capacity and less than
+ // the maximum capacity.
bool GrowTo(int new_capacity);
// Shrinks the semispace to the new capacity. The new capacity
@@ -1316,14 +1827,40 @@ class SemiSpace : public Space {
// semispace and less than the current capacity.
bool ShrinkTo(int new_capacity);
- // Returns the start address of the space.
- Address low() { return start_; }
+ // Returns the start address of the first page of the space.
+ Address space_start() {
+ ASSERT(anchor_.next_page() != &anchor_);
+ return anchor_.next_page()->body();
+ }
+
+ // Returns the start address of the current page of the space.
+ Address page_low() {
+ return current_page_->body();
+ }
+
// Returns one past the end address of the space.
- Address high() { return low() + capacity_; }
+ Address space_end() {
+ return anchor_.prev_page()->body_limit();
+ }
+
+ // Returns one past the end address of the current page of the space.
+ Address page_high() {
+ return current_page_->body_limit();
+ }
+
+ bool AdvancePage() {
+ NewSpacePage* next_page = current_page_->next_page();
+ if (next_page == anchor()) return false;
+ current_page_ = next_page;
+ return true;
+ }
+
+ // Resets the space to using the first page.
+ void Reset();
// Age mark accessors.
Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark) { age_mark_ = mark; }
+ void set_age_mark(Address mark);
// True if the address is in the address range of this semispace (not
// necessarily below the allocation pointer).
@@ -1338,11 +1875,6 @@ class SemiSpace : public Space {
return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
}
- // The offset of an address from the beginning of the space.
- int SpaceOffsetForAddress(Address addr) {
- return static_cast<int>(addr - low());
- }
-
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
virtual intptr_t Size() {
@@ -1359,9 +1891,19 @@ class SemiSpace : public Space {
bool Commit();
bool Uncommit();
+ NewSpacePage* first_page() { return anchor_.next_page(); }
+ NewSpacePage* current_page() { return current_page_; }
+
#ifdef DEBUG
virtual void Print();
virtual void Verify();
+ // Validate a range of of addresses in a SemiSpace.
+ // The "from" address must be on a page prior to the "to" address,
+ // in the linked page order, or it must be earlier on the same page.
+ static void AssertValidRange(Address from, Address to);
+#else
+ // Do nothing.
+ inline static void AssertValidRange(Address from, Address to) {}
#endif
// Returns the current capacity of the semi space.
@@ -1373,7 +1915,17 @@ class SemiSpace : public Space {
// Returns the initial capacity of the semi space.
int InitialCapacity() { return initial_capacity_; }
+ SemiSpaceId id() { return id_; }
+
+ static void Swap(SemiSpace* from, SemiSpace* to);
+
private:
+ // Flips the semispace between being from-space and to-space.
+ // Copies the flags into the masked positions on all pages in the space.
+ void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+ NewSpacePage* anchor() { return &anchor_; }
+
// The current and maximum capacity of the space.
int capacity_;
int maximum_capacity_;
@@ -1390,7 +1942,13 @@ class SemiSpace : public Space {
uintptr_t object_expected_;
bool committed_;
+ SemiSpaceId id_;
+
+ NewSpacePage anchor_;
+ NewSpacePage* current_page_;
+ friend class SemiSpaceIterator;
+ friend class NewSpacePageIterator;
public:
TRACK_MEMORY("SemiSpace")
};
@@ -1406,12 +1964,26 @@ class SemiSpaceIterator : public ObjectIterator {
// Create an iterator over the objects in the given space. If no start
// address is given, the iterator starts from the bottom of the space. If
// no size function is given, the iterator calls Object::Size().
+
+ // Iterate over all of allocated to-space.
explicit SemiSpaceIterator(NewSpace* space);
+ // Iterate over all of allocated to-space, with a custome size function.
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+ // Iterate over part of allocated to-space, from start to the end
+ // of allocation.
SemiSpaceIterator(NewSpace* space, Address start);
+ // Iterate from one address to another in the same semi-space.
+ SemiSpaceIterator(Address from, Address to);
- HeapObject* next() {
+ HeapObject* Next() {
if (current_ == limit_) return NULL;
+ if (NewSpacePage::IsAtEnd(current_)) {
+ NewSpacePage* page = NewSpacePage::FromLimit(current_);
+ page = page->next_page();
+ ASSERT(!page->is_anchor());
+ current_ = page->body();
+ if (current_ == limit_) return NULL;
+ }
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1421,14 +1993,13 @@ class SemiSpaceIterator : public ObjectIterator {
}
// Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return next(); }
+ virtual HeapObject* next_object() { return Next(); }
private:
- void Initialize(NewSpace* space, Address start, Address end,
+ void Initialize(Address start,
+ Address end,
HeapObjectCallback size_func);
- // The semispace.
- SemiSpace* space_;
// The current iteration point.
Address current_;
// The end of iteration.
@@ -1439,6 +2010,34 @@ class SemiSpaceIterator : public ObjectIterator {
// -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+ // Make an iterator that runs over all pages in to-space.
+ explicit inline NewSpacePageIterator(NewSpace* space);
+
+ // Make an iterator that runs over all pages in the given semispace,
+ // even those not used in allocation.
+ explicit inline NewSpacePageIterator(SemiSpace* space);
+
+ // Make iterator that iterates from the page containing start
+ // to the page that contains limit in the same semispace.
+ inline NewSpacePageIterator(Address start, Address limit);
+
+ inline bool has_next();
+ inline NewSpacePage* next();
+
+ private:
+ NewSpacePage* prev_page_; // Previous page returned.
+ // Next page that will be returned. Cached here so that we can use this
+ // iterator for operations that deallocate pages.
+ NewSpacePage* next_page_;
+ // Last page returned.
+ NewSpacePage* last_page_;
+};
+
+
+// -----------------------------------------------------------------------------
// The young generation space.
//
// The new space consists of a contiguous pair of semispaces. It simply
@@ -1449,11 +2048,13 @@ class NewSpace : public Space {
// Constructor.
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- to_space_(heap),
- from_space_(heap) {}
+ to_space_(heap, kToSpace),
+ from_space_(heap, kFromSpace),
+ reservation_(),
+ inline_allocation_limit_step_(0) {}
// Sets up the new space using the given chunk.
- bool Setup(Address start, int size);
+ bool Setup(int reserved_semispace_size_, int max_semispace_size);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -1480,18 +2081,30 @@ class NewSpace : public Space {
return (reinterpret_cast<uintptr_t>(a) & address_mask_)
== reinterpret_cast<uintptr_t>(start_);
}
+
bool Contains(Object* o) {
- return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+ Address a = reinterpret_cast<Address>(o);
+ return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
}
// Return the allocated bytes in the active semispace.
- virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
+ virtual intptr_t Size() {
+ return pages_used_ * Page::kObjectAreaSize +
+ static_cast<int>(top() - to_space_.page_low());
+ }
+
// The same, but returning an int. We have to have the one that returns
// intptr_t because it is inherited, but if we know we are dealing with the
// new space, which can't get as big as the other spaces then this is useful:
int SizeAsInt() { return static_cast<int>(Size()); }
// Return the current capacity of a semispace.
+ intptr_t EffectiveCapacity() {
+ SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
+ return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
+ }
+
+ // Return the current capacity of a semispace.
intptr_t Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity();
@@ -1503,8 +2116,10 @@ class NewSpace : public Space {
return Capacity();
}
- // Return the available bytes without growing in the active semispace.
- intptr_t Available() { return Capacity() - Size(); }
+ // Return the available bytes without growing.
+ intptr_t Available() {
+ return Capacity() - Size();
+ }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -1519,9 +2134,12 @@ class NewSpace : public Space {
}
// Return the address of the allocation pointer in the active semispace.
- Address top() { return allocation_info_.top; }
+ Address top() {
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
+ return allocation_info_.top;
+ }
// Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.low(); }
+ Address bottom() { return to_space_.space_start(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
@@ -1533,54 +2151,68 @@ class NewSpace : public Space {
Address start() { return start_; }
uintptr_t mask() { return address_mask_; }
+ INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
+ ASSERT(Contains(addr));
+ ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
+ IsAligned(OffsetFrom(addr) - 1, kPointerSize));
+ return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+ }
+
+ INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
+ return reinterpret_cast<Address>(index << kPointerSizeLog2);
+ }
+
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
- MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes, &allocation_info_);
- }
-
- // Allocate the requested number of bytes for relocation during mark-compact
- // collection.
- MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
- }
+ MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
- // Reset the reloction pointer to the bottom of the inactive semispace in
- // preparation for mark-compact collection.
- void MCResetRelocationInfo();
- // Update the allocation pointer in the active semispace after a
- // mark-compact collection.
- void MCCommitRelocationInfo();
- // Get the extent of the inactive semispace (for use as a marking stack).
- Address FromSpaceLow() { return from_space_.low(); }
- Address FromSpaceHigh() { return from_space_.high(); }
+ void LowerInlineAllocationLimit(intptr_t step) {
+ inline_allocation_limit_step_ = step;
+ if (step == 0) {
+ allocation_info_.limit = to_space_.page_high();
+ } else {
+ allocation_info_.limit = Min(
+ allocation_info_.top + inline_allocation_limit_step_,
+ allocation_info_.limit);
+ }
+ top_on_previous_step_ = allocation_info_.top;
+ }
- // Get the extent of the active semispace (to sweep newly copied objects
- // during a scavenge collection).
- Address ToSpaceLow() { return to_space_.low(); }
- Address ToSpaceHigh() { return to_space_.high(); }
+ // Get the extent of the inactive semispace (for use as a marking stack,
+ // or to zap it). Notice: space-addresses are not necessarily on the
+ // same page, so FromSpaceStart() might be above FromSpaceEnd().
+ Address FromSpacePageLow() { return from_space_.page_low(); }
+ Address FromSpacePageHigh() { return from_space_.page_high(); }
+ Address FromSpaceStart() { return from_space_.space_start(); }
+ Address FromSpaceEnd() { return from_space_.space_end(); }
- // Offsets from the beginning of the semispaces.
- int ToSpaceOffsetForAddress(Address a) {
- return to_space_.SpaceOffsetForAddress(a);
+ // Get the extent of the active semispace's pages' memory.
+ Address ToSpaceStart() { return to_space_.space_start(); }
+ Address ToSpaceEnd() { return to_space_.space_end(); }
+
+ inline bool ToSpaceContains(Address address) {
+ return to_space_.Contains(address);
}
- int FromSpaceOffsetForAddress(Address a) {
- return from_space_.SpaceOffsetForAddress(a);
+ inline bool FromSpaceContains(Address address) {
+ return from_space_.Contains(address);
}
// True if the object is a heap object in the address range of the
// respective semispace (not necessarily below the allocation pointer of the
// semispace).
- bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+ inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+ inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
- bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
- bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+ // Try to switch the active semispace to a new, empty, page.
+ // Returns false if this isn't possible or reasonable (i.e., there
+ // are no pages, or the current page is already empty), or true
+ // if successful.
+ bool AddFreshPage();
virtual bool ReserveSpace(int bytes);
@@ -1620,10 +2252,24 @@ class NewSpace : public Space {
return from_space_.Uncommit();
}
+ inline intptr_t inline_allocation_limit_step() {
+ return inline_allocation_limit_step_;
+ }
+
+ SemiSpace* active_space() { return &to_space_; }
+
private:
+ // Update allocation info to match the current to-space page.
+ void UpdateAllocationInfo();
+
+ Address chunk_base_;
+ uintptr_t chunk_size_;
+
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
+ VirtualMemory reservation_;
+ int pages_used_;
// Start address and bit mask for containment testing.
Address start_;
@@ -1634,15 +2280,19 @@ class NewSpace : public Space {
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
- AllocationInfo mc_forwarding_info_;
+
+ // When incremental marking is active we will set allocation_info_.limit
+ // to be lower than actual limit and then will gradually increase it
+ // in steps to guarantee that we do incremental marking steps even
+ // when all allocation is performed from inlined generated code.
+ intptr_t inline_allocation_limit_step_;
+
+ Address top_on_previous_step_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- // Implementation of AllocateRaw and MCAllocateRaw.
- MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
- int size_in_bytes,
- AllocationInfo* alloc_info);
+ MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
friend class SemiSpaceIterator;
@@ -1652,193 +2302,6 @@ class NewSpace : public Space {
// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline Address next(Heap* heap);
- inline void set_next(Heap* heap, Address next);
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space.
-class OldSpaceFreeList BASE_EMBEDDED {
- public:
- OldSpaceFreeList(Heap* heap, AllocationSpace owner);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // ie, its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
- MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
-
- void MarkNodes();
-
- private:
- // The size range of blocks, in bytes. (Smaller allocations are allowed, but
- // will always result in waste.)
- static const int kMinBlockSize = 2 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
- Heap* heap_;
-
- // The identity of the owning space, for building allocation Failure
- // objects.
- AllocationSpace owner_;
-
- // Total available bytes in all blocks on this free list.
- int available_;
-
- // Blocks are put on exact free lists in an array, indexed by size in words.
- // The available sizes are kept in an increasingly ordered list. Entries
- // corresponding to sizes < kMinBlockSize always have an empty free list
- // (but index kHead is used for the head of the size list).
- struct SizeNode {
- // Address of the head FreeListNode of the implied block size or NULL.
- Address head_node_;
- // Size (words) of the next larger available size if head_node_ != NULL.
- int next_size_;
- };
- static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
- SizeNode free_[kFreeListsLength];
-
- // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
- static const int kHead = kMinBlockSize / kPointerSize - 1;
- static const int kEnd = kMaxInt;
-
- // We keep a "finger" in the size list to speed up a common pattern:
- // repeated requests for the same or increasing sizes.
- int finger_;
-
- // Starting from *prev, find and return the smallest size >= index (words),
- // or kEnd. Update *prev to be the largest size < index, or kHead.
- int FindSize(int index, int* prev) {
- int cur = free_[*prev].next_size_;
- while (cur < index) {
- *prev = cur;
- cur = free_[cur].next_size_;
- }
- return cur;
- }
-
- // Remove an existing element from the size list.
- void RemoveSize(int index) {
- int prev = kHead;
- int cur = FindSize(index, &prev);
- ASSERT(cur == index);
- free_[prev].next_size_ = free_[cur].next_size_;
- finger_ = prev;
- }
-
- // Insert a new element into the size list.
- void InsertSize(int index) {
- int prev = kHead;
- int cur = FindSize(index, &prev);
- ASSERT(cur != index);
- free_[prev].next_size_ = index;
- free_[index].next_size_ = cur;
- }
-
- // The size list is not updated during a sequence of calls to Free, but is
- // rebuilt before the next allocation.
- void RebuildSizeList();
- bool needs_rebuild_;
-
-#ifdef DEBUG
- // Does this free list contain a free block located at the address of 'node'?
- bool Contains(FreeListNode* node);
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
-};
-
-
-// The free list for the map space.
-class FixedSizeFreeList BASE_EMBEDDED {
- public:
- FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
-
- // Place a node on the free list. The block starting at 'start' (assumed to
- // have size object_size_) is placed on the free list. Bookkeeping
- // information will be written to the block, ie, its contents will be
- // destroyed. The start address should be word aligned.
- void Free(Address start);
-
- // Allocate a fixed sized block from the free list. The block is unitialized.
- // A failure is returned if no block is available.
- MUST_USE_RESULT MaybeObject* Allocate();
-
- void MarkNodes();
-
- private:
- Heap* heap_;
-
- // Available bytes on the free list.
- intptr_t available_;
-
- // The head of the free list.
- Address head_;
-
- // The tail of the free list.
- Address tail_;
-
- // The identity of the owning space, for building allocation Failure
- // objects.
- AllocationSpace owner_;
-
- // The size of the objects in this space.
- int object_size_;
-
- DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
-};
-
-
-// -----------------------------------------------------------------------------
// Old object space (excluding map objects)
class OldSpace : public PagedSpace {
@@ -1849,71 +2312,28 @@ class OldSpace : public PagedSpace {
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : PagedSpace(heap, max_capacity, id, executable),
- free_list_(heap, id) {
+ : PagedSpace(heap, max_capacity, id, executable) {
page_extra_ = 0;
}
- // The bytes available on the free list (ie, not above the linear allocation
- // pointer).
- intptr_t AvailableFree() { return free_list_.available(); }
-
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
}
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- void Free(Address start, int size_in_bytes, bool add_to_freelist) {
- accounting_stats_.DeallocateBytes(size_in_bytes);
-
- if (add_to_freelist) {
- int wasted_bytes = free_list_.Free(start, size_in_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- }
- }
-
- virtual void DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist);
-
- // Prepare for full garbage collection. Resets the relocation pointer and
- // clears the free list.
- virtual void PrepareForMarkCompact(bool will_compact);
-
- // Updates the allocation pointer to the relocation top after a mark-compact
- // collection.
- virtual void MCCommitRelocationInfo();
-
- virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
- void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
- // Reports statistics for the space
- void ReportStatistics();
-#endif
-
- protected:
- // Virtual function in the superclass. Slow path of AllocateRaw.
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
- // Virtual function in the superclass. Allocate linearly at the start of
- // the page after current_page (there is assumed to be one).
- HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
- private:
- // The space's free list.
- OldSpaceFreeList free_list_;
-
public:
TRACK_MEMORY("OldSpace")
};
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+ SLOW_ASSERT((space).page_low() <= (info).top \
+ && (info).top <= (space).page_high() \
+ && (info).limit <= (space).page_high())
+
+
// -----------------------------------------------------------------------------
// Old space for objects of a fixed size
@@ -1926,8 +2346,7 @@ class FixedSpace : public PagedSpace {
const char* name)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
- name_(name),
- free_list_(heap, id, object_size_in_bytes) {
+ name_(name) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
@@ -1938,44 +2357,10 @@ class FixedSpace : public PagedSpace {
int object_size_in_bytes() { return object_size_in_bytes_; }
- // Give a fixed sized block of memory to the space's free list.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- void Free(Address start, bool add_to_freelist) {
- if (add_to_freelist) {
- free_list_.Free(start);
- }
- accounting_stats_.DeallocateBytes(object_size_in_bytes_);
- }
-
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact);
-
- // Updates the allocation pointer to the relocation top after a mark-compact
- // collection.
- virtual void MCCommitRelocationInfo();
-
- virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
- virtual void DeallocateBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist);
-
- void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
- // Reports statistic info of the space
- void ReportStatistics();
-#endif
+ virtual void PrepareForMarkCompact();
protected:
- // Virtual function in the superclass. Slow path of AllocateRaw.
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
- // Virtual function in the superclass. Allocate linearly at the start of
- // the page after current_page (there is assumed to be one).
- HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
void ResetFreeList() {
free_list_.Reset();
}
@@ -1986,9 +2371,6 @@ class FixedSpace : public PagedSpace {
// The name of this space.
const char* name_;
-
- // The space's free list.
- FixedSizeFreeList free_list_;
};
@@ -2004,83 +2386,18 @@ class MapSpace : public FixedSpace {
AllocationSpace id)
: FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
- ASSERT(max_map_space_pages < kMaxMapPageIndex);
}
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact);
-
// Given an index, returns the page address.
- Address PageAddress(int page_index) { return page_addresses_[page_index]; }
-
- static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
-
- // Are map pointers encodable into map word?
- bool MapPointersEncodable() {
- if (!FLAG_use_big_map_space) {
- ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
- return true;
- }
- return CountPagesToTop() <= max_map_space_pages_;
- }
-
- // Should be called after forced sweep to find out if map space needs
- // compaction.
- bool NeedsCompaction(int live_maps) {
- return !MapPointersEncodable() && live_maps <= CompactionThreshold();
- }
-
- Address TopAfterCompaction(int live_maps) {
- ASSERT(NeedsCompaction(live_maps));
-
- int pages_left = live_maps / kMapsPerPage;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (pages_left-- > 0) {
- ASSERT(it.has_next());
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
- }
- ASSERT(it.has_next());
- Page* top_page = it.next();
- top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
- ASSERT(top_page->is_valid());
-
- int offset = live_maps % kMapsPerPage * Map::kSize;
- Address top = top_page->ObjectAreaStart() + offset;
- ASSERT(top < top_page->ObjectAreaEnd());
- ASSERT(Contains(top));
-
- return top;
- }
-
- void FinishCompaction(Address new_top, int live_maps) {
- Page* top_page = Page::FromAddress(new_top);
- ASSERT(top_page->is_valid());
-
- SetAllocationInfo(&allocation_info_, top_page);
- allocation_info_.top = new_top;
-
- int new_size = live_maps * Map::kSize;
- accounting_stats_.DeallocateBytes(accounting_stats_.Size());
- accounting_stats_.AllocateBytes(new_size);
-
- // Flush allocation watermarks.
- for (Page* p = first_page_; p != top_page; p = p->next_page()) {
- p->SetAllocationWatermark(p->AllocationTop());
+ // TODO(1600): this limit is artifical just to keep code compilable
+ static const int kMaxMapPageIndex = 1 << 16;
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (IsPowerOf2(Map::kSize)) {
+ return RoundDown(size, Map::kSize);
+ } else {
+ return (size / Map::kSize) * Map::kSize;
}
- top_page->SetAllocationWatermark(new_top);
-
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- intptr_t actual_size = 0;
- for (Page* p = first_page_; p != top_page; p = p->next_page())
- actual_size += kMapsPerPage * Map::kSize;
- actual_size += (new_top - top_page->ObjectAreaStart());
- ASSERT(accounting_stats_.Size() == actual_size);
- }
-#endif
-
- Shrink();
- ResetFreeList();
}
protected:
@@ -2098,9 +2415,6 @@ class MapSpace : public FixedSpace {
const int max_map_space_pages_;
- // An array of page start address in a map space.
- Address page_addresses_[kMaxMapPageIndex];
-
public:
TRACK_MEMORY("MapSpace")
};
@@ -2116,6 +2430,14 @@ class CellSpace : public FixedSpace {
: FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
{}
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
+ return RoundDown(size, JSGlobalPropertyCell::kSize);
+ } else {
+ return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
+ }
+ }
+
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
@@ -2133,67 +2455,9 @@ class CellSpace : public FixedSpace {
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.
-// A LargeObjectChunk holds exactly one large object page with exactly one
-// large object.
-class LargeObjectChunk {
- public:
- // Allocates a new LargeObjectChunk that contains a large object page
- // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
- // object) bytes after the object area start of that page.
- static LargeObjectChunk* New(int size_in_bytes, Executability executable);
-
- // Free the memory associated with the chunk.
- void Free(Executability executable);
-
- // Interpret a raw address as a large object chunk.
- static LargeObjectChunk* FromAddress(Address address) {
- return reinterpret_cast<LargeObjectChunk*>(address);
- }
-
- // Returns the address of this chunk.
- Address address() { return reinterpret_cast<Address>(this); }
-
- Page* GetPage() {
- return Page::FromAddress(RoundUp(address(), Page::kPageSize));
- }
-
- // Accessors for the fields of the chunk.
- LargeObjectChunk* next() { return next_; }
- void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
- size_t size() { return size_ & ~Page::kPageFlagMask; }
-
- // Compute the start address in the chunk.
- Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
-
- // Returns the object in this chunk.
- HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
-
- // Given a requested size returns the physical size of a chunk to be
- // allocated.
- static int ChunkSizeFor(int size_in_bytes);
-
- // Given a chunk size, returns the object size it can accommodate. Used by
- // LargeObjectSpace::Available.
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- private:
- // A pointer to the next large object chunk in the space or NULL.
- LargeObjectChunk* next_;
-
- // The total size of this chunk.
- size_t size_;
-
- public:
- TRACK_MEMORY("LargeObjectChunk")
-};
-
-
class LargeObjectSpace : public Space {
public:
- LargeObjectSpace(Heap* heap, AllocationSpace id);
+ LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
virtual ~LargeObjectSpace() {}
// Initializes internal data structures.
@@ -2202,12 +2466,15 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- // Allocates a (non-FixedArray, non-Code) large object.
- MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
- // Allocates a large Code object.
- MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
- // Allocates a large FixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
+ static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+ if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+ return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+ }
+
+ // Shared implementation of AllocateRaw, AllocateRawCode and
+ // AllocateRawFixedArray.
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
+ Executability executable);
// Available bytes for objects in this space.
inline intptr_t Available();
@@ -2231,10 +2498,7 @@ class LargeObjectSpace : public Space {
// Finds a large object page containing the given pc, returns NULL
// if such a page doesn't exist.
- LargeObjectChunk* FindChunkContainingPc(Address pc);
-
- // Iterates objects covered by dirty regions.
- void IterateDirtyRegions(ObjectSlotCallback func);
+ LargePage* FindPageContainingPc(Address pc);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@@ -2243,13 +2507,15 @@ class LargeObjectSpace : public Space {
bool Contains(HeapObject* obj);
// Checks whether the space is empty.
- bool IsEmpty() { return first_chunk_ == NULL; }
+ bool IsEmpty() { return first_page_ == NULL; }
// See the comments for ReserveSpace in the Space class. This has to be
// called after ReserveSpace has been called on the paged spaces, since they
// may use some memory, leaving less for large objects.
virtual bool ReserveSpace(int bytes);
+ LargePage* first_page() { return first_page_; }
+
#ifdef DEBUG
virtual void Verify();
virtual void Print();
@@ -2261,18 +2527,13 @@ class LargeObjectSpace : public Space {
bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
private:
+ intptr_t max_capacity_;
// The head of the linked list of large object chunks.
- LargeObjectChunk* first_chunk_;
+ LargePage* first_page_;
intptr_t size_; // allocated bytes
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
- int object_size,
- Executability executable);
-
friend class LargeObjectIterator;
public:
@@ -2285,17 +2546,78 @@ class LargeObjectIterator: public ObjectIterator {
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
- HeapObject* next();
+ HeapObject* Next();
// implementation of ObjectIterator.
- virtual HeapObject* next_object() { return next(); }
+ virtual HeapObject* next_object() { return Next(); }
private:
- LargeObjectChunk* current_;
+ LargePage* current_;
HeapObjectCallback size_func_;
};
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space.
+class PointerChunkIterator BASE_EMBEDDED {
+ public:
+ inline explicit PointerChunkIterator(Heap* heap);
+
+ // Return NULL when the iterator is done.
+ MemoryChunk* next() {
+ switch (state_) {
+ case kOldPointerState: {
+ if (old_pointer_iterator_.has_next()) {
+ return old_pointer_iterator_.next();
+ }
+ state_ = kMapState;
+ // Fall through.
+ }
+ case kMapState: {
+ if (map_iterator_.has_next()) {
+ return map_iterator_.next();
+ }
+ state_ = kLargeObjectState;
+ // Fall through.
+ }
+ case kLargeObjectState: {
+ HeapObject* heap_object;
+ do {
+ heap_object = lo_iterator_.Next();
+ if (heap_object == NULL) {
+ state_ = kFinishedState;
+ return NULL;
+ }
+ // Fixed arrays are the only pointer-containing objects in large
+ // object space.
+ } while (!heap_object->IsFixedArray());
+ MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+ return answer;
+ }
+ case kFinishedState:
+ return NULL;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+
+ private:
+ enum State {
+ kOldPointerState,
+ kMapState,
+ kLargeObjectState,
+ kFinishedState
+ };
+ State state_;
+ PageIterator old_pointer_iterator_;
+ PageIterator map_iterator_;
+ LargeObjectIterator lo_iterator_;
+};
+
+
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
index 9c2287eab..4640ed5b0 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/splay-tree-inl.h
@@ -45,7 +45,7 @@ template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
if (is_empty()) {
// If the tree is empty, insert the new node.
- root_ = new Node(key, Config::kNoValue);
+ root_ = new Node(key, Config::NoValue());
} else {
// Splay on the key to move the last node on the search path
// for the key to the root of the tree.
@@ -57,7 +57,7 @@ bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
return false;
}
// Insert the new node.
- Node* node = new Node(key, Config::kNoValue);
+ Node* node = new Node(key, Config::NoValue());
InsertInternal(cmp, node);
}
locator->bind(root_);
@@ -226,7 +226,7 @@ template<typename Config, class Allocator>
void SplayTree<Config, Allocator>::Splay(const Key& key) {
if (is_empty())
return;
- Node dummy_node(Config::kNoKey, Config::kNoValue);
+ Node dummy_node(Config::kNoKey, Config::NoValue());
// Create a dummy node. The use of the dummy node is a bit
// counter-intuitive: The right child of the dummy node will hold
// the L tree of the algorithm. The left child of the dummy node
diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h
new file mode 100644
index 000000000..dd65cbcc9
--- /dev/null
+++ b/deps/v8/src/store-buffer-inl.h
@@ -0,0 +1,79 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_INL_H_
+#define V8_STORE_BUFFER_INL_H_
+
+#include "store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+Address StoreBuffer::TopAddress() {
+ return reinterpret_cast<Address>(heap_->store_buffer_top_address());
+}
+
+
+void StoreBuffer::Mark(Address addr) {
+ ASSERT(!heap_->cell_space()->Contains(addr));
+ ASSERT(!heap_->code_space()->Contains(addr));
+ Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+ *top++ = addr;
+ heap_->public_set_store_buffer_top(top);
+ if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
+ ASSERT(top == limit_);
+ Compact();
+ } else {
+ ASSERT(top < limit_);
+ }
+}
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+ if (store_buffer_rebuilding_enabled_) {
+ SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
+ !heap_->code_space()->Contains(addr) &&
+ !heap_->old_data_space()->Contains(addr) &&
+ !heap_->new_space()->Contains(addr));
+ Address* top = old_top_;
+ *top++ = addr;
+ old_top_ = top;
+ old_buffer_is_sorted_ = false;
+ old_buffer_is_filtered_ = false;
+ if (top >= old_limit_) {
+ ASSERT(callback_ != NULL);
+ (*callback_)(heap_,
+ MemoryChunk::FromAnyPointerAddress(addr),
+ kStoreBufferFullEvent);
+ }
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
new file mode 100644
index 000000000..7c8b5f207
--- /dev/null
+++ b/deps/v8/src/store-buffer.cc
@@ -0,0 +1,696 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+StoreBuffer::StoreBuffer(Heap* heap)
+ : heap_(heap),
+ start_(NULL),
+ limit_(NULL),
+ old_start_(NULL),
+ old_limit_(NULL),
+ old_top_(NULL),
+ old_buffer_is_sorted_(false),
+ old_buffer_is_filtered_(false),
+ during_gc_(false),
+ store_buffer_rebuilding_enabled_(false),
+ callback_(NULL),
+ may_move_store_buffer_entries_(true),
+ virtual_memory_(NULL),
+ hash_map_1_(NULL),
+ hash_map_2_(NULL) {
+}
+
+
+void StoreBuffer::Setup() {
+ virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
+ uintptr_t start_as_int =
+ reinterpret_cast<uintptr_t>(virtual_memory_->address());
+ start_ =
+ reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+ limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+
+ old_top_ = old_start_ = new Address[kOldStoreBufferLength];
+ old_limit_ = old_start_ + kOldStoreBufferLength;
+
+ ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
+ ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
+ Address* vm_limit = reinterpret_cast<Address*>(
+ reinterpret_cast<char*>(virtual_memory_->address()) +
+ virtual_memory_->size());
+ ASSERT(start_ <= vm_limit);
+ ASSERT(limit_ <= vm_limit);
+ USE(vm_limit);
+ ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
+ ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
+ 0);
+
+ virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+ kStoreBufferSize,
+ false); // Not executable.
+ heap_->public_set_store_buffer_top(start_);
+
+ hash_map_1_ = new uintptr_t[kHashMapLength];
+ hash_map_2_ = new uintptr_t[kHashMapLength];
+
+ ZapHashTables();
+}
+
+
+void StoreBuffer::TearDown() {
+ delete virtual_memory_;
+ delete[] hash_map_1_;
+ delete[] hash_map_2_;
+ delete[] old_start_;
+ old_start_ = old_top_ = old_limit_ = NULL;
+ start_ = limit_ = NULL;
+ heap_->public_set_store_buffer_top(start_);
+}
+
+
+void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
+ isolate->heap()->store_buffer()->Compact();
+}
+
+
+#if V8_TARGET_ARCH_X64
+static int CompareAddresses(const void* void_a, const void* void_b) {
+ intptr_t a =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+ intptr_t b =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+ // Unfortunately if int is smaller than intptr_t there is no branch-free
+ // way to return a number with the same sign as the difference between the
+ // pointers.
+ if (a == b) return 0;
+ if (a < b) return -1;
+ ASSERT(a > b);
+ return 1;
+}
+#else
+static int CompareAddresses(const void* void_a, const void* void_b) {
+ intptr_t a =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+ intptr_t b =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+ ASSERT(sizeof(1) == sizeof(a));
+ // Shift down to avoid wraparound.
+ return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
+}
+#endif
+
+
+void StoreBuffer::Uniq() {
+ ASSERT(HashTablesAreZapped());
+ // Remove adjacent duplicates and cells that do not point at new space.
+ Address previous = NULL;
+ Address* write = old_start_;
+ ASSERT(may_move_store_buffer_entries_);
+ for (Address* read = old_start_; read < old_top_; read++) {
+ Address current = *read;
+ if (current != previous) {
+ if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+ *write++ = current;
+ }
+ }
+ previous = current;
+ }
+ old_top_ = write;
+}
+
+
+void StoreBuffer::HandleFullness() {
+ if (old_buffer_is_filtered_) return;
+ ASSERT(may_move_store_buffer_entries_);
+ Compact();
+
+ old_buffer_is_filtered_ = true;
+ bool page_has_scan_on_scavenge_flag = false;
+
+ PointerChunkIterator it(heap_);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != NULL) {
+ if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ }
+
+ if (page_has_scan_on_scavenge_flag) {
+ Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+ }
+
+ // If filtering out the entries from scan_on_scavenge pages got us down to
+ // less than half full, then we are satisfied with that.
+ if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+
+ // Sample 1 entry in 97 and filter out the pages where we estimate that more
+ // than 1 in 8 pointers are to new space.
+ static const int kSampleFinenesses = 5;
+ static const struct Samples {
+ int prime_sample_step;
+ int threshold;
+ } samples[kSampleFinenesses] = {
+ { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
+ { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
+ { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
+ { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
+ { 1, 0}
+ };
+ for (int i = kSampleFinenesses - 1; i >= 0; i--) {
+ ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+ // As a last resort we mark all pages as being exempt from the store buffer.
+ ASSERT(i != 0 || old_top_ == old_start_);
+ if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+ }
+ UNREACHABLE();
+}
+
+
+// Sample the store buffer to see if some pages are taking up a lot of space
+// in the store buffer.
+void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
+ PointerChunkIterator it(heap_);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != NULL) {
+ chunk->set_store_buffer_counter(0);
+ }
+ bool created_new_scan_on_scavenge_pages = false;
+ MemoryChunk* previous_chunk = NULL;
+ for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
+ Address addr = *p;
+ MemoryChunk* containing_chunk = NULL;
+ if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+ containing_chunk = previous_chunk;
+ } else {
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ }
+ int old_counter = containing_chunk->store_buffer_counter();
+ if (old_counter == threshold) {
+ containing_chunk->set_scan_on_scavenge(true);
+ created_new_scan_on_scavenge_pages = true;
+ }
+ containing_chunk->set_store_buffer_counter(old_counter + 1);
+ previous_chunk = containing_chunk;
+ }
+ if (created_new_scan_on_scavenge_pages) {
+ Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+ }
+ old_buffer_is_filtered_ = true;
+}
+
+
+void StoreBuffer::Filter(int flag) {
+ Address* new_top = old_start_;
+ MemoryChunk* previous_chunk = NULL;
+ for (Address* p = old_start_; p < old_top_; p++) {
+ Address addr = *p;
+ MemoryChunk* containing_chunk = NULL;
+ if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+ containing_chunk = previous_chunk;
+ } else {
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ previous_chunk = containing_chunk;
+ }
+ if (!containing_chunk->IsFlagSet(flag)) {
+ *new_top++ = addr;
+ }
+ }
+ old_top_ = new_top;
+}
+
+
+void StoreBuffer::SortUniq() {
+ Compact();
+ if (old_buffer_is_sorted_) return;
+ ZapHashTables();
+ qsort(reinterpret_cast<void*>(old_start_),
+ old_top_ - old_start_,
+ sizeof(*old_top_),
+ &CompareAddresses);
+ Uniq();
+
+ old_buffer_is_sorted_ = true;
+}
+
+
+bool StoreBuffer::PrepareForIteration() {
+ Compact();
+ PointerChunkIterator it(heap_);
+ MemoryChunk* chunk;
+ bool page_has_scan_on_scavenge_flag = false;
+ while ((chunk = it.next()) != NULL) {
+ if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ }
+
+ if (page_has_scan_on_scavenge_flag) {
+ Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+ }
+ ZapHashTables();
+ return page_has_scan_on_scavenge_flag;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+ ZapHashTables();
+ Uniq(); // Also removes things that no longer point to new space.
+ CheckForFullBuffer();
+}
+
+
+static bool Zapped(char* start, int size) {
+ for (int i = 0; i < size; i++) {
+ if (start[i] != 0) return false;
+ }
+ return true;
+}
+
+
+bool StoreBuffer::HashTablesAreZapped() {
+ return Zapped(reinterpret_cast<char*>(hash_map_1_),
+ sizeof(uintptr_t) * kHashMapLength) &&
+ Zapped(reinterpret_cast<char*>(hash_map_2_),
+ sizeof(uintptr_t) * kHashMapLength);
+}
+
+
+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+ if (!FLAG_enable_slow_asserts) return true;
+ if (in_store_buffer_1_element_cache != NULL &&
+ *in_store_buffer_1_element_cache == cell_address) {
+ return true;
+ }
+ Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+ for (Address* current = top - 1; current >= start_; current--) {
+ if (*current == cell_address) {
+ in_store_buffer_1_element_cache = current;
+ return true;
+ }
+ }
+ for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+ if (*current == cell_address) {
+ in_store_buffer_1_element_cache = current;
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+
+void StoreBuffer::ZapHashTables() {
+ memset(reinterpret_cast<void*>(hash_map_1_),
+ 0,
+ sizeof(uintptr_t) * kHashMapLength);
+ memset(reinterpret_cast<void*>(hash_map_2_),
+ 0,
+ sizeof(uintptr_t) * kHashMapLength);
+}
+
+
+void StoreBuffer::GCPrologue() {
+ ZapHashTables();
+ during_gc_ = true;
+}
+
+
+#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
+ // Do nothing.
+}
+
+
+void StoreBuffer::VerifyPointers(PagedSpace* space,
+ RegionCallback region_callback) {
+ PageIterator it(space);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ FindPointersToNewSpaceOnPage(
+ reinterpret_cast<PagedSpace*>(page->owner()),
+ page,
+ region_callback,
+ &DummyScavengePointer);
+ }
+}
+
+
+void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ if (object->IsFixedArray()) {
+ Address slot_address = object->address();
+ Address end = object->address() + object->Size();
+
+ while (slot_address < end) {
+ HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+ // When we are not in GC the Heap::InNewSpace() predicate
+ // checks that pointers which satisfy predicate point into
+ // the active semispace.
+ heap_->InNewSpace(*slot);
+ slot_address += kPointerSize;
+ }
+ }
+ }
+}
+#endif
+
+
+void StoreBuffer::Verify() {
+#ifdef DEBUG
+ VerifyPointers(heap_->old_pointer_space(),
+ &StoreBuffer::FindPointersToNewSpaceInRegion);
+ VerifyPointers(heap_->map_space(),
+ &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
+ VerifyPointers(heap_->lo_space());
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue() {
+ during_gc_ = false;
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+ Address start, Address end, ObjectSlotCallback slot_callback) {
+ for (Address slot_address = start;
+ slot_address < end;
+ slot_address += kPointerSize) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (heap_->InNewSpace(*slot)) {
+ HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
+ ASSERT(object->IsHeapObject());
+ slot_callback(reinterpret_cast<HeapObject**>(slot), object);
+ if (heap_->InNewSpace(*slot)) {
+ EnterDirectlyIntoStoreBuffer(slot_address);
+ }
+ }
+ }
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+ Address page = Page::FromAddress(addr)->ObjectAreaStart();
+ return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+ Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+ return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMaps(
+ Address start,
+ Address end,
+ ObjectSlotCallback slot_callback) {
+ ASSERT(MapStartAlign(start) == start);
+ ASSERT(MapEndAlign(end) == end);
+
+ Address map_address = start;
+ while (map_address < end) {
+ ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(Memory::Object_at(map_address)->IsMap());
+
+ Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+ Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+ FindPointersToNewSpaceInRegion(pointer_fields_start,
+ pointer_fields_end,
+ slot_callback);
+ map_address += Map::kSize;
+ }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
+ Address start,
+ Address end,
+ ObjectSlotCallback slot_callback) {
+ Address map_aligned_start = MapStartAlign(start);
+ Address map_aligned_end = MapEndAlign(end);
+
+ ASSERT(map_aligned_start == start);
+ ASSERT(map_aligned_end == end);
+
+ FindPointersToNewSpaceInMaps(map_aligned_start,
+ map_aligned_end,
+ slot_callback);
+}
+
+
+// This function iterates over all the pointers in a paged space in the heap,
+// looking for pointers into new space. Within the pages there may be dead
+// objects that have not been overwritten by free spaces or fillers because of
+// lazy sweeping. These dead objects may not contain pointers to new space.
+// The garbage areas that have been swept properly (these will normally be the
+// large ones) will be marked with free space and filler map words. In
+// addition any area that has never been used at all for object allocation must
+// be marked with a free space or filler. Because the free space and filler
+// maps do not move we can always recognize these even after a compaction.
+// Normal objects like FixedArrays and JSObjects should not contain references
+// to these maps. The special garbage section (see comment in spaces.h) is
+// skipped since it can contain absolutely anything. Any objects that are
+// allocated during iteration may or may not be visited by the iteration, but
+// they will not be partially visited.
+void StoreBuffer::FindPointersToNewSpaceOnPage(
+ PagedSpace* space,
+ Page* page,
+ RegionCallback region_callback,
+ ObjectSlotCallback slot_callback) {
+ Address visitable_start = page->ObjectAreaStart();
+ Address end_of_page = page->ObjectAreaEnd();
+
+ Address visitable_end = visitable_start;
+
+ Object* free_space_map = heap_->free_space_map();
+ Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+
+ while (visitable_end < end_of_page) {
+ Object* o = *reinterpret_cast<Object**>(visitable_end);
+ // Skip fillers but not things that look like fillers in the special
+ // garbage section which can contain anything.
+ if (o == free_space_map ||
+ o == two_pointer_filler_map ||
+ (visitable_end == space->top() && visitable_end != space->limit())) {
+ if (visitable_start != visitable_end) {
+ // After calling this the special garbage section may have moved.
+ (this->*region_callback)(visitable_start,
+ visitable_end,
+ slot_callback);
+ if (visitable_end >= space->top() && visitable_end < space->limit()) {
+ visitable_end = space->limit();
+ visitable_start = visitable_end;
+ continue;
+ }
+ }
+ if (visitable_end == space->top() && visitable_end != space->limit()) {
+ visitable_start = visitable_end = space->limit();
+ } else {
+ // At this point we are either at the start of a filler or we are at
+ // the point where the space->top() used to be before the
+ // visit_pointer_region call above. Either way we can skip the
+ // object at the current spot: We don't promise to visit objects
+ // allocated during heap traversal, and if space->top() moved then it
+ // must be because an object was allocated at this point.
+ visitable_start =
+ visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+ visitable_end = visitable_start;
+ }
+ } else {
+ ASSERT(o != free_space_map);
+ ASSERT(o != two_pointer_filler_map);
+ ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+ visitable_end += kPointerSize;
+ }
+ }
+ ASSERT(visitable_end == end_of_page);
+ if (visitable_start != visitable_end) {
+ (this->*region_callback)(visitable_start,
+ visitable_end,
+ slot_callback);
+ }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(
+ ObjectSlotCallback slot_callback) {
+ Address* limit = old_top_;
+ old_top_ = old_start_;
+ {
+ DontMoveStoreBufferEntriesScope scope(this);
+ for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+ Address* saved_top = old_top_;
+#endif
+ Object** slot = reinterpret_cast<Object**>(*current);
+ Object* object = *slot;
+ if (heap_->InFromSpace(object)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+ if (heap_->InNewSpace(*slot)) {
+ EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+ }
+ }
+ ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
+ }
+ }
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+ // We do not sort or remove duplicated entries from the store buffer because
+ // we expect that callback will rebuild the store buffer thus removing
+ // all duplicates and pointers to old space.
+ bool some_pages_to_scan = PrepareForIteration();
+
+ // TODO(gc): we want to skip slots on evacuation candidates
+ // but we can't simply figure that out from slot address
+ // because slot can belong to a large object.
+ IteratePointersInStoreBuffer(slot_callback);
+
+ // We are done scanning all the pointers that were in the store buffer, but
+ // there may be some pages marked scan_on_scavenge that have pointers to new
+ // space that are not in the store buffer. We must scan them now. As we
+ // scan, the surviving pointers to new space will be added to the store
+ // buffer. If there are still a lot of pointers to new space then we will
+ // keep the scan_on_scavenge flag on the page and discard the pointers that
+ // were added to the store buffer. If there are not many pointers to new
+ // space left on the page we will keep the pointers in the store buffer and
+ // remove the flag from the page.
+ if (some_pages_to_scan) {
+ if (callback_ != NULL) {
+ (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
+ }
+ PointerChunkIterator it(heap_);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != NULL) {
+ if (chunk->scan_on_scavenge()) {
+ chunk->set_scan_on_scavenge(false);
+ if (callback_ != NULL) {
+ (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
+ }
+ if (chunk->owner() == heap_->lo_space()) {
+ LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
+ HeapObject* array = large_page->GetObject();
+ ASSERT(array->IsFixedArray());
+ Address start = array->address();
+ Address end = start + array->Size();
+ FindPointersToNewSpaceInRegion(start, end, slot_callback);
+ } else {
+ Page* page = reinterpret_cast<Page*>(chunk);
+ PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+ FindPointersToNewSpaceOnPage(
+ owner,
+ page,
+ (owner == heap_->map_space() ?
+ &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
+ &StoreBuffer::FindPointersToNewSpaceInRegion),
+ slot_callback);
+ }
+ }
+ }
+ if (callback_ != NULL) {
+ (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
+ }
+ }
+}
+
+
+void StoreBuffer::Compact() {
+ Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+
+ if (top == start_) return;
+
+ // There's no check of the limit in the loop below so we check here for
+ // the worst case (compaction doesn't eliminate any pointers).
+ ASSERT(top <= limit_);
+ heap_->public_set_store_buffer_top(start_);
+ if (top - start_ > old_limit_ - old_top_) {
+ HandleFullness();
+ }
+ ASSERT(may_move_store_buffer_entries_);
+ // Goes through the addresses in the store buffer attempting to remove
+ // duplicates. In the interest of speed this is a lossy operation. Some
+ // duplicates will remain. We have two hash tables with different hash
+ // functions to reduce the number of unnecessary clashes.
+ for (Address* current = start_; current < top; current++) {
+ ASSERT(!heap_->cell_space()->Contains(*current));
+ ASSERT(!heap_->code_space()->Contains(*current));
+ ASSERT(!heap_->old_data_space()->Contains(*current));
+ uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
+ // Shift out the last bits including any tags.
+ int_addr >>= kPointerSizeLog2;
+ int hash1 =
+ ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
+ if (hash_map_1_[hash1] == int_addr) continue;
+ int hash2 =
+ ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
+ hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
+ if (hash_map_2_[hash2] == int_addr) continue;
+ if (hash_map_1_[hash1] == 0) {
+ hash_map_1_[hash1] = int_addr;
+ } else if (hash_map_2_[hash2] == 0) {
+ hash_map_2_[hash2] = int_addr;
+ } else {
+ // Rather than slowing down we just throw away some entries. This will
+ // cause some duplicates to remain undetected.
+ hash_map_1_[hash1] = int_addr;
+ hash_map_2_[hash2] = 0;
+ }
+ old_buffer_is_sorted_ = false;
+ old_buffer_is_filtered_ = false;
+ *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+ ASSERT(old_top_ <= old_limit_);
+ }
+ heap_->isolate()->counters()->store_buffer_compactions()->Increment();
+ CheckForFullBuffer();
+}
+
+
+void StoreBuffer::CheckForFullBuffer() {
+ if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
+ HandleFullness();
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
new file mode 100644
index 000000000..e5e50aeb7
--- /dev/null
+++ b/deps/v8/src/store-buffer.h
@@ -0,0 +1,248 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_H_
+#define V8_STORE_BUFFER_H_
+
+#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
+#include "platform.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+class StoreBuffer;
+
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
+typedef void (StoreBuffer::*RegionCallback)(
+ Address start, Address end, ObjectSlotCallback slot_callback);
+
+// Used to implement the write barrier by collecting addresses of pointers
+// between spaces.
+class StoreBuffer {
+ public:
+ explicit StoreBuffer(Heap* heap);
+
+ static void StoreBufferOverflow(Isolate* isolate);
+
+ inline Address TopAddress();
+
+ void Setup();
+ void TearDown();
+
+ // This is used by the mutator to enter addresses into the store buffer.
+ inline void Mark(Address addr);
+
+ // This is used by the heap traversal to enter the addresses into the store
+ // buffer that should still be in the store buffer after GC. It enters
+ // addresses directly into the old buffer because the GC starts by wiping the
+ // old buffer and thereafter only visits each cell once so there is no need
+ // to attempt to remove any dupes. During the first part of a GC we
+ // are using the store buffer to access the old spaces and at the same time
+ // we are rebuilding the store buffer using this function. There is, however
+ // no issue of overwriting the buffer we are iterating over, because this
+ // stage of the scavenge can only reduce the number of addresses in the store
+ // buffer (some objects are promoted so pointers to them do not need to be in
+ // the store buffer). The later parts of the GC scan the pages that are
+ // exempt from the store buffer and process the promotion queue. These steps
+ // can overflow this buffer. We check for this and on overflow we call the
+ // callback set up with the StoreBufferRebuildScope object.
+ inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+ // Iterates over all pointers that go from old space to new space. It will
+ // delete the store buffer as it starts so the callback should reenter
+ // surviving old-to-new pointers into the store buffer to rebuild it.
+ void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
+ static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
+ static const int kStoreBufferSize = kStoreBufferOverflowBit;
+ static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+ static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+ static const int kHashMapLengthLog2 = 12;
+ static const int kHashMapLength = 1 << kHashMapLengthLog2;
+
+ void Compact();
+
+ void GCPrologue();
+ void GCEpilogue();
+
+ Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
+ Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
+ Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+ void SetTop(Object*** top) {
+ ASSERT(top >= Start());
+ ASSERT(top <= Limit());
+ old_top_ = reinterpret_cast<Address*>(top);
+ }
+
+ bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+ bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
+
+ // Goes through the store buffer removing pointers to things that have
+ // been promoted. Rebuilds the store buffer completely if it overflowed.
+ void SortUniq();
+
+ void HandleFullness();
+ void Verify();
+
+ bool PrepareForIteration();
+
+#ifdef DEBUG
+ void Clean();
+ // Slow, for asserts only.
+ bool CellIsInStoreBuffer(Address cell);
+#endif
+
+ void Filter(int flag);
+
+ private:
+ Heap* heap_;
+
+ // The store buffer is divided up into a new buffer that is constantly being
+ // filled by mutator activity and an old buffer that is filled with the data
+ // from the new buffer after compression.
+ Address* start_;
+ Address* limit_;
+
+ Address* old_start_;
+ Address* old_limit_;
+ Address* old_top_;
+
+ bool old_buffer_is_sorted_;
+ bool old_buffer_is_filtered_;
+ bool during_gc_;
+ // The garbage collector iterates over many pointers to new space that are not
+ // handled by the store buffer. This flag indicates whether the pointers
+ // found by the callbacks should be added to the store buffer or not.
+ bool store_buffer_rebuilding_enabled_;
+ StoreBufferCallback callback_;
+ bool may_move_store_buffer_entries_;
+
+ VirtualMemory* virtual_memory_;
+ uintptr_t* hash_map_1_;
+ uintptr_t* hash_map_2_;
+
+ void CheckForFullBuffer();
+ void Uniq();
+ void ZapHashTables();
+ bool HashTablesAreZapped();
+ void ExemptPopularPages(int prime_sample_step, int threshold);
+
+ void FindPointersToNewSpaceInRegion(Address start,
+ Address end,
+ ObjectSlotCallback slot_callback);
+
+ // For each region of pointers on a page in use from an old space call
+ // visit_pointer_region callback.
+ // If either visit_pointer_region or callback can cause an allocation
+ // in old space and changes in allocation watermark then
+ // can_preallocate_during_iteration should be set to true.
+ void IteratePointersOnPage(
+ PagedSpace* space,
+ Page* page,
+ RegionCallback region_callback,
+ ObjectSlotCallback slot_callback);
+
+ void FindPointersToNewSpaceInMaps(
+ Address start,
+ Address end,
+ ObjectSlotCallback slot_callback);
+
+ void FindPointersToNewSpaceInMapsRegion(
+ Address start,
+ Address end,
+ ObjectSlotCallback slot_callback);
+
+ void FindPointersToNewSpaceOnPage(
+ PagedSpace* space,
+ Page* page,
+ RegionCallback region_callback,
+ ObjectSlotCallback slot_callback);
+
+ void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+
+#ifdef DEBUG
+ void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
+ void VerifyPointers(LargeObjectSpace* space);
+#endif
+
+ friend class StoreBufferRebuildScope;
+ friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+ explicit StoreBufferRebuildScope(Heap* heap,
+ StoreBuffer* store_buffer,
+ StoreBufferCallback callback)
+ : heap_(heap),
+ store_buffer_(store_buffer),
+ stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
+ stored_callback_(store_buffer->callback_) {
+ store_buffer_->store_buffer_rebuilding_enabled_ = true;
+ store_buffer_->callback_ = callback;
+ (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+ }
+
+ ~StoreBufferRebuildScope() {
+ store_buffer_->callback_ = stored_callback_;
+ store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
+ store_buffer_->CheckForFullBuffer();
+ }
+
+ private:
+ Heap* heap_;
+ StoreBuffer* store_buffer_;
+ bool stored_state_;
+ StoreBufferCallback stored_callback_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+ explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
+ : store_buffer_(store_buffer),
+ stored_state_(store_buffer->may_move_store_buffer_entries_) {
+ store_buffer_->may_move_store_buffer_entries_ = false;
+ }
+
+ ~DontMoveStoreBufferEntriesScope() {
+ store_buffer_->may_move_store_buffer_entries_ = stored_state_;
+ }
+
+ private:
+ StoreBuffer* store_buffer_;
+ bool stored_state_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_STORE_BUFFER_H_
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index 1223db0f9..f5405833f 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -242,9 +242,9 @@ int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
template <typename PatternChar, typename SubjectChar>
-static inline bool CharCompare(const PatternChar* pattern,
- const SubjectChar* subject,
- int length) {
+inline bool CharCompare(const PatternChar* pattern,
+ const SubjectChar* subject,
+ int length) {
ASSERT(length > 0);
int pos = 0;
do {
@@ -555,10 +555,10 @@ int StringSearch<PatternChar, SubjectChar>::InitialSearch(
// object should be constructed once and the Search function then called
// for each search.
template <typename SubjectChar, typename PatternChar>
-static int SearchString(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int start_index) {
+int SearchString(Isolate* isolate,
+ Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern,
+ int start_index) {
StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
return search.Search(subject, start_index);
}
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 8086cf951..35f7be541 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -350,29 +350,24 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
}
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- switch (descs->GetType(i)) {
- case FIELD: {
- Object* key = descs->GetKey(i);
- if (key->IsString() || key->IsNumber()) {
- int len = 3;
- if (key->IsString()) {
- len = String::cast(key)->length();
- }
- for (; len < 18; len++)
- Put(' ');
- if (key->IsString()) {
- Put(String::cast(key));
- } else {
- key->ShortPrint();
- }
- Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
- Add("%o\n", value);
+ if (descs->GetType(i) == FIELD) {
+ Object* key = descs->GetKey(i);
+ if (key->IsString() || key->IsNumber()) {
+ int len = 3;
+ if (key->IsString()) {
+ len = String::cast(key)->length();
}
+ for (; len < 18; len++)
+ Put(' ');
+ if (key->IsString()) {
+ Put(String::cast(key));
+ } else {
+ key->ShortPrint();
+ }
+ Add(": ");
+ Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+ Add("%o\n", value);
}
- break;
- default:
- break;
}
}
}
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 297105d04..3608bac8f 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -46,16 +46,18 @@
// ECMA-262 section 15.5.4.2
function StringToString() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+ if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
throw new $TypeError('String.prototype.toString is not generic');
+ }
return %_ValueOf(this);
}
// ECMA-262 section 15.5.4.3
function StringValueOf() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+ if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
throw new $TypeError('String.prototype.valueOf is not generic');
+ }
return %_ValueOf(this);
}
@@ -91,7 +93,8 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.concat"]);
}
var len = %_ArgumentsLength();
var this_as_string = TO_STRING_INLINE(this);
@@ -358,7 +361,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
builder_elements.push(SubString(string, position, next));
}
}
-};
+}
// Compute the string of a given regular expression capture.
@@ -371,7 +374,7 @@ function CaptureString(string, lastCaptureInfo, index) {
if (start < 0) return;
var end = lastCaptureInfo[CAPTURE(scaled + 1)];
return SubString(string, start, end);
-};
+}
// Add the string of a given regular expression capture to the
@@ -384,7 +387,7 @@ function addCaptureString(builder, matchInfo, index) {
if (start < 0) return;
var end = matchInfo[CAPTURE(scaled + 1)];
builder.addSpecialSlice(start, end);
-};
+}
// TODO(lrn): This array will survive indefinitely if replace is never
// called again. However, it will be empty, since the contents are cleared
@@ -531,30 +534,36 @@ function StringSlice(start, end) {
var s_len = s.length;
var start_i = TO_INTEGER(start);
var end_i = s_len;
- if (end !== void 0)
+ if (end !== void 0) {
end_i = TO_INTEGER(end);
+ }
if (start_i < 0) {
start_i += s_len;
- if (start_i < 0)
+ if (start_i < 0) {
start_i = 0;
+ }
} else {
- if (start_i > s_len)
+ if (start_i > s_len) {
start_i = s_len;
+ }
}
if (end_i < 0) {
end_i += s_len;
- if (end_i < 0)
+ if (end_i < 0) {
end_i = 0;
+ }
} else {
- if (end_i > s_len)
+ if (end_i > s_len) {
end_i = s_len;
+ }
}
var num_c = end_i - start_i;
- if (num_c < 0)
+ if (num_c < 0) {
num_c = 0;
+ }
return SubString(s, start_i, start_i + num_c);
}
@@ -568,7 +577,6 @@ function StringSplit(separator, limit) {
}
var subject = TO_STRING_INLINE(this);
limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
- if (limit === 0) return [];
// ECMA-262 says that if separator is undefined, the result should
// be an array of size 1 containing the entire string. SpiderMonkey
@@ -582,6 +590,9 @@ function StringSplit(separator, limit) {
var length = subject.length;
if (!IS_REGEXP(separator)) {
separator = TO_STRING_INLINE(separator);
+
+ if (limit === 0) return [];
+
var separator_length = separator.length;
// If the separator string is empty then return the elements in the subject.
@@ -592,6 +603,8 @@ function StringSplit(separator, limit) {
return result;
}
+ if (limit === 0) return [];
+
%_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
if (length === 0) {
@@ -688,7 +701,7 @@ function StringSubstring(start, end) {
}
}
- return (start_i + 1 == end_i
+ return ((start_i + 1 == end_i)
? %_StringCharAt(s, start_i)
: %_SubString(s, start_i, end_i));
}
@@ -732,7 +745,7 @@ function StringSubstr(start, n) {
var end = start + len;
if (end > s.length) end = s.length;
- return (start + 1 == end
+ return ((start + 1 == end)
? %_StringCharAt(s, start)
: %_SubString(s, start, end));
}
@@ -832,7 +845,7 @@ function HtmlEscape(str) {
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#039;");
-};
+}
// Compatibility support for KJS.
@@ -953,7 +966,7 @@ function SetUpString() {
// Set up the non-enumerable functions on the String prototype object.
- InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
+ InstallFunctions($String.prototype, DONT_ENUM, $Array(
"valueOf", StringValueOf,
"toString", StringToString,
"charAt", StringCharAt,
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index c89c8f333..be79c8008 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -27,7 +27,6 @@
#include <stdarg.h>
#include <math.h>
-#include <limits>
#include "globals.h"
#include "utils.h"
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 55963303c..8b6e28f9f 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -55,7 +55,15 @@ void StubCache::Initialize(bool create_heap_objects) {
ASSERT(IsPowerOf2(kSecondaryTableSize));
if (create_heap_objects) {
HandleScope scope;
- Clear();
+ Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+ for (int i = 0; i < kPrimaryTableSize; i++) {
+ primary_[i].key = heap()->empty_string();
+ primary_[i].value = empty;
+ }
+ for (int j = 0; j < kSecondaryTableSize; j++) {
+ secondary_[j].key = heap()->empty_string();
+ secondary_[j].value = empty;
+ }
}
}
@@ -101,8 +109,8 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
}
-MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
- JSObject* receiver) {
+Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
+ Handle<JSObject> receiver) {
ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
// If no global objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map
@@ -110,558 +118,431 @@ MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
// there are global objects involved, we need to check global
// property cells in the stub and therefore the stub will be
// specific to the name.
- String* cache_name = heap()->empty_string();
+ Handle<String> cache_name = factory()->empty_string();
if (receiver->IsGlobalObject()) cache_name = name;
- JSObject* last = receiver;
+ Handle<JSObject> last = receiver;
while (last->GetPrototype() != heap()->null_value()) {
- last = JSObject::cast(last->GetPrototype());
+ last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
if (last->IsGlobalObject()) cache_name = name;
}
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
- Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadNonexistent(cache_name, receiver, last);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadNonexistent(cache_name, receiver, last);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
+ JSObject::UpdateMapCodeCache(receiver, cache_name, code);
return code;
}
-MaybeObject* StubCache::ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int field_index) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadField(receiver, holder, field_index, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadField(receiver, holder, field_index, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadCallback(name, receiver, holder, callback);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadConstant(receiver, holder, value, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadConstant(receiver, holder, value, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadInterceptor(receiver, holder, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeLoadNormal() {
- return isolate_->builtins()->builtin(Builtins::kLoadIC_Normal);
+Handle<Code> StubCache::ComputeLoadNormal() {
+ return isolate_->builtins()->LoadIC_Normal();
}
-MaybeObject* StubCache::ComputeLoadGlobal(String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
+Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
bool is_dont_delete) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- LoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadGlobal(receiver,
- holder,
- cell,
- name,
- is_dont_delete);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int field_index) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadField(name, receiver, holder, field_index);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadField(name, receiver, holder, field_index);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadConstant(name, receiver, holder, value);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadConstant(name, receiver, holder, value);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
- ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadCallback(name, receiver, holder, callback);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-
-MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
- JSArray* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
+ Handle<JSArray> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- ASSERT(receiver->IsJSObject());
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileLoadArrayLength(name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
- String* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
+ Handle<String> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Map* map = receiver->map();
- Object* code = map->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Map> map(receiver->map());
+ Handle<Object> probe(map->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileLoadStringLength(name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ Map::UpdateCodeCache(map, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
- String* name,
- JSFunction* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
+ Handle<String> name,
+ Handle<JSFunction> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileLoadFunctionPrototype(name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeStoreField(String* name,
- JSObject* receiver,
+Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
+ Handle<JSObject> receiver,
int field_index,
- Map* transition,
+ Handle<Map> transition,
StrictModeFlag strict_mode) {
- PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+ PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code =
+ compiler.CompileStoreField(receiver, field_index, transition, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
- JSObject* receiver,
- bool is_store,
+Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
+ Handle<JSObject> receiver,
+ KeyedIC::StubKind stub_kind,
StrictModeFlag strict_mode) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(
- is_store ? Code::KEYED_STORE_IC :
- Code::KEYED_LOAD_IC,
+ stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
+ : Code::KEYED_STORE_IC,
NORMAL,
strict_mode);
- String* name = is_store
- ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
- : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
- Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
- if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
-
- MaybeObject* maybe_new_code = NULL;
- Map* receiver_map = receiver->map();
- if (is_store) {
- KeyedStoreStubCompiler compiler(strict_mode);
- maybe_new_code = compiler.CompileStoreElement(receiver_map);
- } else {
- KeyedLoadStubCompiler compiler;
- maybe_new_code = compiler.CompileLoadElement(receiver_map);
+ Handle<String> name;
+ switch (stub_kind) {
+ case KeyedIC::LOAD:
+ name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol();
+ break;
+ case KeyedIC::STORE_NO_TRANSITION:
+ name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ Handle<Map> receiver_map(receiver->map());
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ Handle<Code> code;
+ switch (stub_kind) {
+ case KeyedIC::LOAD: {
+ KeyedLoadStubCompiler compiler(isolate_);
+ code = compiler.CompileLoadElement(receiver_map);
+ break;
+ }
+ case KeyedIC::STORE_NO_TRANSITION: {
+ KeyedStoreStubCompiler compiler(isolate_, strict_mode);
+ code = compiler.CompileStoreElement(receiver_map);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- Code* code;
- if (!maybe_new_code->To(&code)) return maybe_new_code;
- if (is_store) {
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
- Code::cast(code), 0));
+
+ ASSERT(!code.is_null());
+
+ if (stub_kind == KeyedIC::LOAD) {
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0));
} else {
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
- Code::cast(code), 0));
- }
- ASSERT(code->IsCode());
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
}
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return isolate_->builtins()->builtin((strict_mode == kStrictMode)
- ? Builtins::kStoreIC_Normal_Strict
- : Builtins::kStoreIC_Normal);
+Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
+ return (strict_mode == kStrictMode)
+ ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
+ : isolate_->builtins()->Builtins::StoreIC_Normal();
}
-MaybeObject* StubCache::ComputeStoreGlobal(String* name,
- GlobalObject* receiver,
- JSGlobalPropertyCell* cell,
+Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
+ Handle<GlobalObject> receiver,
+ Handle<JSGlobalPropertyCell> cell,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, NORMAL, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreGlobal(receiver, cell, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeStoreCallback(
- String* name,
- JSObject* receiver,
- AccessorInfo* callback,
- StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<AccessorInfo> callback,
+ StrictModeFlag strict_mode) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, CALLBACKS, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreCallback(receiver, callback, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code = compiler.CompileStoreCallback(receiver, callback, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-MaybeObject* StubCache::ComputeStoreInterceptor(
- String* name,
- JSObject* receiver,
- StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
+ Handle<JSObject> receiver,
+ StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, INTERCEPTOR, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- StoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreInterceptor(receiver, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate_,
- CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-
-MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
- JSObject* receiver,
+Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
+ Handle<JSObject> receiver,
int field_index,
- Map* transition,
+ Handle<Map> transition,
StrictModeFlag strict_mode) {
- PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+ PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedStoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code =
- compiler.CompileStoreField(receiver, field_index, transition, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode);
+ Handle<Code> code =
+ compiler.CompileStoreField(receiver, field_index, transition, name);
+ PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
+
#define CALL_LOGGER_TAG(kind, type) \
(kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
-MaybeObject* StubCache::ComputeCallConstant(int argc,
+Handle<Code> StubCache::ComputeCallConstant(int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function) {
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(object, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+ IC::GetCodeCacheForObject(*object, *holder);
+ Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
// Compute check type based on receiver/holder.
CheckType check = RECEIVER_MAP_CHECK;
@@ -673,51 +554,36 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
check = BOOLEAN_CHECK;
}
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- CONSTANT_FUNCTION,
- extra_ic_state,
- cache_holder,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- // If the function hasn't been compiled yet, we cannot do it now
- // because it may cause GC. To avoid this issue, we return an
- // internal error which will make sure we do not update any
- // caches.
- if (!function->is_compiled()) return Failure::InternalError();
- // Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallConstant(object, holder, function, name, check);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- Code::cast(code)->set_check_type(check);
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(kind, CONSTANT_FUNCTION, extra_state,
+ cache_holder, argc);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
+ Handle<Code> code =
+ compiler.CompileCallConstant(object, holder, function, name, check);
+ code->set_check_type(check);
+ ASSERT_EQ(flags, code->flags());
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(map_holder, name, code);
return code;
}
-MaybeObject* StubCache::ComputeCallField(int argc,
+Handle<Code> StubCache::ComputeCallField(int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder,
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<Object> object,
+ Handle<JSObject> holder,
int index) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(object, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+ IC::GetCodeCacheForObject(*object, *holder);
+ Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -726,47 +592,35 @@ MaybeObject* StubCache::ComputeCallField(int argc,
object = holder;
}
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- FIELD,
- extra_ic_state,
- cache_holder,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallField(JSObject::cast(object),
- holder,
- index,
- name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate_,
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(kind, FIELD, extra_state,
+ cache_holder, argc);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
+ Handle<Code> code =
+ compiler.CompileCallField(Handle<JSObject>::cast(object),
+ holder, index, name);
+ ASSERT_EQ(flags, code->flags());
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(map_holder, name, code);
return code;
}
-MaybeObject* StubCache::ComputeCallInterceptor(
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder) {
+Handle<Code> StubCache::ComputeCallInterceptor(int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<Object> object,
+ Handle<JSObject> holder) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(object, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+ IC::GetCodeCacheForObject(*object, *holder);
+ Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -775,134 +629,61 @@ MaybeObject* StubCache::ComputeCallInterceptor(
object = holder;
}
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- INTERCEPTOR,
- extra_ic_state,
- cache_holder,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallNormal(int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- JSObject* receiver) {
- Object* code;
- { MaybeObject* maybe_code = ComputeCallNormal(argc, kind, extra_ic_state);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(kind, INTERCEPTOR, extra_state,
+ cache_holder, argc);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+ Handle<Code> code =
+ compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
+ holder, name);
+ ASSERT_EQ(flags, code->flags());
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(map_holder, name, code);
return code;
}
-MaybeObject* StubCache::ComputeCallGlobal(int argc,
+Handle<Code> StubCache::ComputeCallGlobal(int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function) {
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function) {
InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(receiver, holder);
- JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- NORMAL,
- extra_ic_state,
- cache_holder,
- argc);
- Object* code = map_holder->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- // If the function hasn't been compiled yet, we cannot do it now
- // because it may cause GC. To avoid this issue, we return an
- // internal error which will make sure we do not update any
- // caches.
- if (!function->is_compiled()) return Failure::InternalError();
- CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
- { MaybeObject* maybe_code =
- compiler.CompileCallGlobal(receiver, holder, cell, function, name);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
- Code::cast(code), name));
- GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
- Object* result;
- { MaybeObject* maybe_result =
- map_holder->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
+ IC::GetCodeCacheForObject(*receiver, *holder);
+ Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(kind, NORMAL, extra_state,
+ cache_holder, argc);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+ Handle<Code> code =
+ compiler.CompileCallGlobal(receiver, holder, cell, function, name);
+ ASSERT_EQ(flags, code->flags());
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(map_holder, name, code);
return code;
}
-static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
- // Use raw_unchecked... so we don't get assert failures during GC.
- NumberDictionary* dictionary =
- isolate->heap()->raw_unchecked_non_monomorphic_cache();
- int entry = dictionary->FindEntry(isolate, flags);
- if (entry != -1) return dictionary->ValueAt(entry);
- return isolate->heap()->raw_unchecked_undefined_value();
-}
-
-
-MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
- Code::Flags flags) {
- Heap* heap = isolate->heap();
- Object* probe = GetProbeValue(isolate, flags);
- if (probe != heap->undefined_value()) return probe;
- // Seed the cache with an undefined value to make sure that any
- // generated code object can always be inserted into the cache
- // without causing allocation failures.
- Object* result;
- { MaybeObject* maybe_result =
- heap->non_monomorphic_cache()->AtNumberPut(flags,
- heap->undefined_value());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- heap->public_set_non_monomorphic_cache(NumberDictionary::cast(result));
- return probe;
-}
-
-
-static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
- Object* code;
- if (maybe_code->ToObject(&code)) {
- if (code->IsCode()) {
- Heap* heap = isolate->heap();
- int entry = heap->non_monomorphic_cache()->FindEntry(
- Code::cast(code)->flags());
- // The entry must be present see comment in ProbeCache.
- ASSERT(entry != -1);
- ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
- heap->undefined_value());
- heap->non_monomorphic_cache()->ValueAtPut(entry, code);
- CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
- }
- }
- return maybe_code;
+static void FillCache(Isolate* isolate, Handle<Code> code) {
+ Handle<NumberDictionary> dictionary =
+ NumberDictionarySet(isolate->factory()->non_monomorphic_cache(),
+ code->flags(),
+ code,
+ PropertyDetails(NONE, NORMAL));
+ isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
}
@@ -912,202 +693,184 @@ Code* StubCache::FindCallInitialize(int argc,
Code::ExtraICState extra_state =
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
- Code::Flags flags = Code::ComputeFlags(kind,
- UNINITIALIZED,
- extra_state,
- NORMAL,
- argc);
- Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
- ASSERT(result != heap()->undefined_value());
+ Code::Flags flags =
+ Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+
+ // Use raw_unchecked... so we don't get assert failures during GC.
+ NumberDictionary* dictionary =
+ isolate()->heap()->raw_unchecked_non_monomorphic_cache();
+ int entry = dictionary->FindEntry(isolate(), flags);
+ ASSERT(entry != -1);
+ Object* code = dictionary->ValueAt(entry);
// This might be called during the marking phase of the collector
// hence the unchecked cast.
- return reinterpret_cast<Code*>(result);
+ return reinterpret_cast<Code*>(code);
}
-MaybeObject* StubCache::ComputeCallInitialize(int argc,
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
RelocInfo::Mode mode,
Code::Kind kind) {
Code::ExtraICState extra_state =
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
- Code::Flags flags = Code::ComputeFlags(kind,
- UNINITIALIZED,
- extra_state,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallInitialize(flags));
+ Code::Flags flags =
+ Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallInitialize(flags);
+ FillCache(isolate_, code);
+ return code;
}
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
- RelocInfo::Mode mode) {
- CALL_HEAP_FUNCTION(isolate_,
- ComputeCallInitialize(argc, mode, Code::CALL_IC),
- Code);
+Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
+ return ComputeCallInitialize(argc, mode, Code::CALL_IC);
}
Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
- CALL_HEAP_FUNCTION(
- isolate_,
- ComputeCallInitialize(argc, RelocInfo::CODE_TARGET, Code::KEYED_CALL_IC),
- Code);
+ return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
+ Code::KEYED_CALL_IC);
}
-MaybeObject* StubCache::ComputeCallPreMonomorphic(
+Handle<Code> StubCache::ComputeCallPreMonomorphic(
int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
- Code::Flags flags = Code::ComputeFlags(kind,
- PREMONOMORPHIC,
- extra_ic_state,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
+ Code::ExtraICState extra_state) {
+ Code::Flags flags =
+ Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
+ FillCache(isolate_, code);
+ return code;
}
-MaybeObject* StubCache::ComputeCallNormal(int argc,
+Handle<Code> StubCache::ComputeCallNormal(int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_ic_state,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallNormal(flags));
+ Code::ExtraICState extra_state) {
+ Code::Flags flags =
+ Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallNormal(flags);
+ FillCache(isolate_, code);
+ return code;
}
-MaybeObject* StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
ASSERT(kind == Code::KEYED_CALL_IC);
- Code::Flags flags = Code::ComputeFlags(kind,
- MEGAMORPHIC,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallArguments(flags));
+ Code::Flags flags =
+ Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
+ NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallArguments(flags);
+ FillCache(isolate_, code);
+ return code;
}
-MaybeObject* StubCache::ComputeCallMegamorphic(
+Handle<Code> StubCache::ComputeCallMegamorphic(
int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
- Code::Flags flags = Code::ComputeFlags(kind,
- MEGAMORPHIC,
- extra_ic_state,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
+ Code::ExtraICState extra_state) {
+ Code::Flags flags =
+ Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
+ NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallMegamorphic(flags);
+ FillCache(isolate_, code);
+ return code;
}
-MaybeObject* StubCache::ComputeCallMiss(int argc,
+Handle<Code> StubCache::ComputeCallMiss(int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+ Code::ExtraICState extra_state) {
// MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
// and monomorphic stubs are not mixed up together in the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC_PROTOTYPE_FAILURE,
- extra_ic_state,
- NORMAL,
- argc,
- OWN_MAP);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallMiss(flags));
+ Code::Flags flags =
+ Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
+ NORMAL, argc, OWN_MAP);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallMiss(flags);
+ FillCache(isolate_, code);
+ return code;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(
- int argc,
- Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
+ Code::Kind kind) {
// Extra IC state is irrelevant for debug break ICs. They jump to
// the actual call ic to carry out the work.
- Code::Flags flags = Code::ComputeFlags(kind,
- DEBUG_BREAK,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
+ Code::Flags flags =
+ Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
+ NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallDebugBreak(flags);
+ FillCache(isolate_, code);
+ return code;
}
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
- int argc,
- Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
+ Code::Kind kind) {
// Extra IC state is irrelevant for debug break ICs. They jump to
// the actual call ic to carry out the work.
- Code::Flags flags = Code::ComputeFlags(kind,
- DEBUG_PREPARE_STEP_IN,
- Code::kNoExtraICState,
- NORMAL,
- argc);
- Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- if (!probe->IsUndefined()) return probe;
- StubCompiler compiler;
- return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
+ Code::Flags flags =
+ Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
+ NORMAL, argc);
+ Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate_, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ StubCompiler compiler(isolate_);
+ Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
+ FillCache(isolate_, code);
+ return code;
}
#endif
void StubCache::Clear() {
+ Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = heap()->empty_string();
- primary_[i].value = isolate_->builtins()->builtin(
- Builtins::kIllegal);
+ primary_[i].value = empty;
}
for (int j = 0; j < kSecondaryTableSize; j++) {
secondary_[j].key = heap()->empty_string();
- secondary_[j].value = isolate_->builtins()->builtin(
- Builtins::kIllegal);
+ secondary_[j].value = empty;
}
}
@@ -1341,8 +1104,8 @@ RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
JSObject* recv = JSObject::cast(args[0]);
String* name = String::cast(args[1]);
Object* value = args[2];
+ ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
- ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
MaybeObject* result = recv->SetPropertyWithInterceptor(
@@ -1359,62 +1122,47 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
}
-MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+ Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
+ CallIC::GenerateInitialize(masm(), argc, extra_state);
} else {
KeyedCallIC::GenerateInitialize(masm(), argc);
}
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallInitialize");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
isolate()->counters()->call_initialize_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
- return result;
+ *code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
+ return code;
}
-MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
// The code of the PreMonomorphic stub is the same as the code
// of the Initialized stub. They just differ on the code object flags.
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+ Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
+ CallIC::GenerateInitialize(masm(), argc, extra_state);
} else {
KeyedCallIC::GenerateInitialize(masm(), argc);
}
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
isolate()->counters()->call_premonomorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
- return result;
+ *code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
+ return code;
}
-MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
@@ -1425,116 +1173,82 @@ MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
} else {
KeyedCallIC::GenerateNormal(masm(), argc);
}
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
isolate()->counters()->call_normal_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
- return result;
+ *code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
+ return code;
}
-MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+ Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
+ CallIC::GenerateMegamorphic(masm(), argc, extra_state);
} else {
KeyedCallIC::GenerateMegamorphic(masm(), argc);
}
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallMegamorphic");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
isolate()->counters()->call_megamorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
- return result;
+ *code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+ return code;
}
-MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallArguments");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
- return result;
+ CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
+ CALL_MEGAMORPHIC_TAG),
+ *code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+ return code;
}
-MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+ Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc, extra_ic_state);
+ CallIC::GenerateMiss(masm(), argc, extra_state);
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
isolate()->counters()->call_megamorphic_stubs()->Increment();
- Code* code = Code::cast(result);
- USE(code);
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
- code, code->arguments_count()));
- GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
- return result;
+ *code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
+ return code;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
Debug::GenerateCallICDebugBreak(masm());
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallDebugBreak");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- Code::Kind kind = Code::ExtractKindFromFlags(flags);
- USE(kind);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
PROFILE(isolate(),
- CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
- code, code->arguments_count()));
- return result;
+ CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
+ CALL_DEBUG_BREAK_TAG),
+ *code, code->arguments_count()));
+ return code;
}
-MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- HandleScope scope(isolate());
- // Use the same code for the the step in preparations as we do for
- // the miss case.
+Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
+ // Use the same code for the the step in preparations as we do for the
+ // miss case.
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
@@ -1543,133 +1257,94 @@ MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
- Object* result;
- { MaybeObject* maybe_result =
- GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
PROFILE(isolate(),
CodeCreateEvent(
CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- code,
+ *code,
code->arguments_count()));
- return result;
+ return code;
}
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
#undef CALL_LOGGER_TAG
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags,
- const char* name) {
- // Check for allocation failures during stub compilation.
- if (failure_->IsFailure()) return failure_;
+Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+ const char* name) {
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
- MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
+ Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs && !result->IsFailure()) {
- Code::cast(result->ToObjectUnchecked())->Disassemble(name);
- }
+ if (FLAG_print_code_stubs) code->Disassemble(name);
#endif
- return result;
+ return code;
}
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
- if (FLAG_print_code_stubs && (name != NULL)) {
- return GetCodeWithFlags(flags, *name->ToCString());
- }
- return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+ Handle<String> name) {
+ return (FLAG_print_code_stubs && !name.is_null())
+ ? GetCodeWithFlags(flags, *name->ToCString())
+ : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
}
-void StubCompiler::LookupPostInterceptor(JSObject* holder,
- String* name,
+void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
+ Handle<String> name,
LookupResult* lookup) {
- holder->LocalLookupRealNamedProperty(name, lookup);
- if (!lookup->IsProperty()) {
- lookup->NotFound();
- Object* proto = holder->GetPrototype();
- if (!proto->IsNull()) {
- proto->Lookup(name, lookup);
- }
- }
-}
+ holder->LocalLookupRealNamedProperty(*name, lookup);
+ if (lookup->IsProperty()) return;
+
+ lookup->NotFound();
+ if (holder->GetPrototype()->IsNull()) return;
+ holder->GetPrototype()->Lookup(*name, lookup);
+}
-MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ return code;
}
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
- String* name,
+Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
+ Handle<String> name,
InlineCacheState state) {
Code::Flags flags = Code::ComputeFlags(
Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ return code;
}
-MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
+ Handle<String> name) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ return code;
}
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
- String* name,
+Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
+ Handle<String> name,
InlineCacheState state) {
Code::Flags flags =
Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type);
- MaybeObject* result = GetCodeWithFlags(flags, name);
- if (!result->IsFailure()) {
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
- Code::cast(result->ToObjectUnchecked()),
- name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
- name,
- Code::cast(result->ToObjectUnchecked())));
- }
- return result;
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+ return code;
}
@@ -1679,50 +1354,49 @@ void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
}
-CallStubCompiler::CallStubCompiler(int argc,
+CallStubCompiler::CallStubCompiler(Isolate* isolate,
+ int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state,
+ Code::ExtraICState extra_state,
InlineCacheHolderFlag cache_holder)
- : arguments_(argc),
+ : StubCompiler(isolate),
+ arguments_(argc),
kind_(kind),
- extra_ic_state_(extra_ic_state),
+ extra_state_(extra_state),
cache_holder_(cache_holder) {
}
-bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
- SharedFunctionInfo* info = function->shared();
- if (info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = info->builtin_function_id();
+bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
+ if (function->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
}
+
CallOptimization optimization(function);
- if (optimization.is_simple_api_call()) {
- return true;
- }
- return false;
+ return optimization.is_simple_api_call();
}
-MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* fname) {
+Handle<Code> CallStubCompiler::CompileCustomCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> fname) {
ASSERT(HasCustomCallGenerator(function));
- SharedFunctionInfo* info = function->shared();
- if (info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname); \
+ if (function->shared()->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name) \
+ if (id == k##name) { \
+ return CallStubCompiler::Compile##name##Call(object, \
+ holder, \
+ cell, \
+ function, \
+ fname); \
}
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
@@ -1738,100 +1412,99 @@ MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
}
-MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> CallStubCompiler::GetCode(PropertyType type, Handle<String> name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
- extra_ic_state_,
+ extra_state_,
cache_holder_,
argc);
return GetCodeWithFlags(flags, name);
}
-MaybeObject* CallStubCompiler::GetCode(JSFunction* function) {
- String* function_name = NULL;
+Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
+ Handle<String> function_name;
if (function->shared()->name()->IsString()) {
- function_name = String::cast(function->shared()->name());
+ function_name = Handle<String>(String::cast(function->shared()->name()));
}
return GetCode(CONSTANT_FUNCTION, function_name);
}
-MaybeObject* ConstructStubCompiler::GetCode() {
+Handle<Code> ConstructStubCompiler::GetCode() {
Code::Flags flags = Code::ComputeFlags(Code::STUB);
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ConstructStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
- GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
- return result;
+ Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
+ GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
+ return code;
}
CallOptimization::CallOptimization(LookupResult* lookup) {
- if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+ if (!lookup->IsProperty() ||
+ !lookup->IsCacheable() ||
lookup->type() != CONSTANT_FUNCTION) {
- Initialize(NULL);
+ Initialize(Handle<JSFunction>::null());
} else {
// We only optimize constant function calls.
- Initialize(lookup->GetConstantFunction());
+ Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
}
}
-CallOptimization::CallOptimization(JSFunction* function) {
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
Initialize(function);
}
-int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const {
- ASSERT(is_simple_api_call_);
- if (expected_receiver_type_ == NULL) return 0;
+int CallOptimization::GetPrototypeDepthOfExpectedType(
+ Handle<JSObject> object,
+ Handle<JSObject> holder) const {
+ ASSERT(is_simple_api_call());
+ if (expected_receiver_type_.is_null()) return 0;
int depth = 0;
- while (object != holder) {
- if (object->IsInstanceOf(expected_receiver_type_)) return depth;
- object = JSObject::cast(object->GetPrototype());
+ while (!object.is_identical_to(holder)) {
+ if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
+ object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
++depth;
}
- if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+ if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
return kInvalidProtoDepth;
}
-void CallOptimization::Initialize(JSFunction* function) {
- constant_function_ = NULL;
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+ constant_function_ = Handle<JSFunction>::null();
is_simple_api_call_ = false;
- expected_receiver_type_ = NULL;
- api_call_info_ = NULL;
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+ api_call_info_ = Handle<CallHandlerInfo>::null();
- if (function == NULL || !function->is_compiled()) return;
+ if (function.is_null() || !function->is_compiled()) return;
constant_function_ = function;
AnalyzePossibleApiFunction(function);
}
-void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
- SharedFunctionInfo* sfi = function->shared();
- if (!sfi->IsApiFunction()) return;
- FunctionTemplateInfo* info = sfi->get_api_func_data();
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+ if (!function->shared()->IsApiFunction()) return;
+ Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
// Require a C++ callback.
if (info->call_code()->IsUndefined()) return;
- api_call_info_ = CallHandlerInfo::cast(info->call_code());
+ api_call_info_ =
+ Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
// Accept signatures that either have no restrictions at all or
// only have restrictions on the receiver.
if (!info->signature()->IsUndefined()) {
- SignatureInfo* signature = SignatureInfo::cast(info->signature());
+ Handle<SignatureInfo> signature =
+ Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
if (!signature->args()->IsUndefined()) return;
if (!signature->receiver()->IsUndefined()) {
expected_receiver_type_ =
- FunctionTemplateInfo::cast(signature->receiver());
+ Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(signature->receiver()));
}
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 18c157b16..cc42e05fa 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "arguments.h"
+#include "ic-inl.h"
#include "macro-assembler.h"
#include "objects.h"
#include "zone-inl.h"
@@ -75,207 +76,167 @@ class StubCache {
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
- MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
- String* name,
- JSObject* receiver);
+ Handle<Code> ComputeLoadNonexistent(Handle<String> name,
+ Handle<JSObject> receiver);
- MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ Handle<Code> ComputeLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ int field_index);
- MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
+ Handle<Code> ComputeLoadCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback);
- MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
+ Handle<Code> ComputeLoadConstant(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value);
- MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
- String* name,
- JSObject* receiver,
- JSObject* holder);
+ Handle<Code> ComputeLoadInterceptor(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder);
- MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
-
-
- MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- bool is_dont_delete);
+ Handle<Code> ComputeLoadNormal();
+ Handle<Code> ComputeLoadGlobal(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ bool is_dont_delete);
// ---
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ Handle<Code> ComputeKeyedLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ int field_index);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
+ Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
+ Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
- String* name,
- JSObject* receiver,
- JSObject* holder);
+ Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
- String* name,
- JSArray* receiver);
+ Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name,
+ Handle<JSArray> receiver);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
- String* name,
- String* receiver);
+ Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name,
+ Handle<String> receiver);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
- String* name,
- JSFunction* receiver);
+ Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name,
+ Handle<JSFunction> receiver);
// ---
- MUST_USE_RESULT MaybeObject* ComputeStoreField(
- String* name,
- JSObject* receiver,
- int field_index,
- Map* transition,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
- String* name,
- GlobalObject* receiver,
- JSGlobalPropertyCell* cell,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
- String* name,
- JSObject* receiver,
- AccessorInfo* callback,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
- String* name,
- JSObject* receiver,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreField(Handle<String> name,
+ Handle<JSObject> receiver,
+ int field_index,
+ Handle<Map> transition,
+ StrictModeFlag strict_mode);
- // ---
+ Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
+
+ Handle<Code> ComputeStoreGlobal(Handle<String> name,
+ Handle<GlobalObject> receiver,
+ Handle<JSGlobalPropertyCell> cell,
+ StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
- String* name,
- JSObject* receiver,
- int field_index,
- Map* transition,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<AccessorInfo> callback,
+ StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
- JSObject* receiver,
- bool is_store,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreInterceptor(Handle<String> name,
+ Handle<JSObject> receiver,
+ StrictModeFlag strict_mode);
// ---
- MUST_USE_RESULT MaybeObject* ComputeCallField(
- int argc,
- Code::Kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder,
- int index);
-
- MUST_USE_RESULT MaybeObject* ComputeCallConstant(
- int argc,
- Code::Kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function);
-
- MUST_USE_RESULT MaybeObject* ComputeCallNormal(
- int argc,
- Code::Kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- JSObject* receiver);
-
- MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
- int argc,
- Code::Kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- Object* object,
- JSObject* holder);
-
- MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
- int argc,
- Code::Kind,
- Code::ExtraICState extra_ic_state,
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function);
+ Handle<Code> ComputeKeyedStoreField(Handle<String> name,
+ Handle<JSObject> receiver,
+ int field_index,
+ Handle<Map> transition,
+ StrictModeFlag strict_mode);
+
+ Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<JSObject> receiver,
+ KeyedIC::StubKind stub_kind,
+ StrictModeFlag strict_mode);
// ---
- MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind);
+ Handle<Code> ComputeCallField(int argc,
+ Code::Kind,
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ int index);
+
+ Handle<Code> ComputeCallConstant(int argc,
+ Code::Kind,
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function);
+
+ Handle<Code> ComputeCallInterceptor(int argc,
+ Code::Kind,
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<Object> object,
+ Handle<JSObject> holder);
+
+ Handle<Code> ComputeCallGlobal(int argc,
+ Code::Kind,
+ Code::ExtraICState extra_state,
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function);
- Handle<Code> ComputeCallInitialize(int argc,
- RelocInfo::Mode mode);
+ // ---
+
+ Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
Handle<Code> ComputeKeyedCallInitialize(int argc);
- MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state);
+ Handle<Code> ComputeCallPreMonomorphic(int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state);
- MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
+ Handle<Code> ComputeCallNormal(int argc,
+ Code::Kind kind,
+ Code::ExtraICState state);
- MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
- Code::Kind kind);
+ Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
- MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
+ Handle<Code> ComputeCallMegamorphic(int argc,
+ Code::Kind kind,
+ Code::ExtraICState state);
- MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
- Code::Kind kind,
- Code::ExtraICState state);
+ Handle<Code> ComputeCallMiss(int argc,
+ Code::Kind kind,
+ Code::ExtraICState state);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- MUST_USE_RESULT Code* FindCallInitialize(int argc,
- RelocInfo::Mode mode,
- Code::Kind kind);
+ Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
- MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
+ Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
- MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind);
+ Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
#endif
// Update cache for entry hash(name, map).
@@ -329,16 +290,14 @@ class StubCache {
Isolate* isolate() { return isolate_; }
Heap* heap() { return isolate()->heap(); }
+ Factory* factory() { return isolate()->factory(); }
private:
explicit StubCache(Isolate* isolate);
- friend class Isolate;
- friend class SCTableReference;
- static const int kPrimaryTableSize = 2048;
- static const int kSecondaryTableSize = 512;
- Entry primary_[kPrimaryTableSize];
- Entry secondary_[kSecondaryTableSize];
+ Handle<Code> ComputeCallInitialize(int argc,
+ RelocInfo::Mode mode,
+ Code::Kind kind);
// Computes the hashed offsets for primary and secondary caches.
static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
@@ -383,8 +342,16 @@ class StubCache {
reinterpret_cast<Address>(table) + (offset << shift_amount));
}
+ static const int kPrimaryTableSize = 2048;
+ static const int kSecondaryTableSize = 512;
+
+ Entry primary_[kPrimaryTableSize];
+ Entry secondary_[kSecondaryTableSize];
Isolate* isolate_;
+ friend class Isolate;
+ friend class SCTableReference;
+
DISALLOW_COPY_AND_ASSIGN(StubCache);
};
@@ -406,21 +373,24 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
-// The stub compiler compiles stubs for the stub cache.
+// The stub compilers compile stubs for the stub cache.
class StubCompiler BASE_EMBEDDED {
public:
- StubCompiler()
- : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
-
- MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
+ explicit StubCompiler(Isolate* isolate)
+ : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
+
+ // Functions to compile either CallIC or KeyedCallIC. The specific kind
+ // is extracted from the code flags.
+ Handle<Code> CompileCallInitialize(Code::Flags flags);
+ Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileCallNormal(Code::Flags flags);
+ Handle<Code> CompileCallMegamorphic(Code::Flags flags);
+ Handle<Code> CompileCallArguments(Code::Flags flags);
+ Handle<Code> CompileCallMiss(Code::Flags flags);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
- MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
- MUST_USE_RESULT MaybeObject* CompileCallDebugPrepareStepIn(Code::Flags flags);
+ Handle<Code> CompileCallDebugBreak(Code::Flags flags);
+ Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
#endif
// Static functions for generating parts of stubs.
@@ -440,8 +410,10 @@ class StubCompiler BASE_EMBEDDED {
Label* miss);
static void GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index);
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
@@ -462,9 +434,9 @@ class StubCompiler BASE_EMBEDDED {
Label* miss_label);
static void GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -490,88 +462,87 @@ class StubCompiler BASE_EMBEDDED {
// The function can optionally (when save_at_depth !=
// kInvalidProtoDepth) save the object at the given depth by moving
// it to [esp + kPointerSize].
-
- Register CheckPrototypes(JSObject* object,
+ Register CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
Label* miss) {
return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
scratch2, name, kInvalidProtoDepth, miss);
}
- Register CheckPrototypes(JSObject* object,
+ Register CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss);
protected:
- MaybeObject* GetCodeWithFlags(Code::Flags flags, const char* name);
- MaybeObject* GetCodeWithFlags(Code::Flags flags, String* name);
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
- void GenerateLoadField(JSObject* object,
- JSObject* holder,
+ void GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss);
- MaybeObject* GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss);
+ void GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss);
- void GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+ void GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss);
- void GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ void GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss);
- static void LookupPostInterceptor(JSObject* holder,
- String* name,
+ static void LookupPostInterceptor(Handle<JSObject> holder,
+ Handle<String> name,
LookupResult* lookup);
- Isolate* isolate() { return scope_.isolate(); }
+ Isolate* isolate() { return isolate_; }
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
private:
- HandleScope scope_;
+ Isolate* isolate_;
MacroAssembler masm_;
Failure* failure_;
};
@@ -579,70 +550,75 @@ class StubCompiler BASE_EMBEDDED {
class LoadStubCompiler: public StubCompiler {
public:
- MUST_USE_RESULT MaybeObject* CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last);
-
- MUST_USE_RESULT MaybeObject* CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback);
-
- MUST_USE_RESULT MaybeObject* CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete);
+ explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+
+ Handle<Code> CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last);
+
+ Handle<Code> CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ int index,
+ Handle<String> name);
+
+ Handle<Code> CompileLoadCallback(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback);
+
+ Handle<Code> CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name);
+
+ Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name);
+
+ Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete);
private:
- MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+ Handle<Code> GetCode(PropertyType type, Handle<String> name);
};
class KeyedLoadStubCompiler: public StubCompiler {
public:
- MUST_USE_RESULT MaybeObject* CompileLoadField(String* name,
- JSObject* object,
- JSObject* holder,
- int index);
+ explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+
+ Handle<Code> CompileLoadField(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ int index);
- MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback);
+ Handle<Code> CompileLoadCallback(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback);
- MUST_USE_RESULT MaybeObject* CompileLoadConstant(String* name,
- JSObject* object,
- JSObject* holder,
- Object* value);
+ Handle<Code> CompileLoadConstant(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value);
- MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
+ Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileLoadArrayLength(String* name);
- MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
- MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
+ Handle<Code> CompileLoadArrayLength(Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
+ Handle<Code> CompileLoadStringLength(Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics);
+ Handle<Code> CompileLoadFunctionPrototype(Handle<String> name);
+
+ Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
+
+ Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics);
static void GenerateLoadExternalArray(MacroAssembler* masm,
ElementsKind elements_kind);
@@ -654,34 +630,36 @@ class KeyedLoadStubCompiler: public StubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
- MaybeObject* GetCode(PropertyType type,
- String* name,
+ Handle<Code> GetCode(PropertyType type,
+ Handle<String> name,
InlineCacheState state = MONOMORPHIC);
};
class StoreStubCompiler: public StubCompiler {
public:
- explicit StoreStubCompiler(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
+ StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
+ : StubCompiler(isolate), strict_mode_(strict_mode) { }
- MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
- AccessorInfo* callbacks,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreInterceptor(JSObject* object,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* holder,
- String* name);
+ Handle<Code> CompileStoreField(Handle<JSObject> object,
+ int index,
+ Handle<Map> transition,
+ Handle<String> name);
+
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name);
+ Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
+ Handle<String> name);
+
+ Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> holder,
+ Handle<String> name);
private:
- MaybeObject* GetCode(PropertyType type, String* name);
+ Handle<Code> GetCode(PropertyType type, Handle<String> name);
StrictModeFlag strict_mode_;
};
@@ -689,22 +667,23 @@ class StoreStubCompiler: public StubCompiler {
class KeyedStoreStubCompiler: public StubCompiler {
public:
- explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
+ KeyedStoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
+ : StubCompiler(isolate), strict_mode_(strict_mode) { }
- MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name);
+ Handle<Code> CompileStoreField(Handle<JSObject> object,
+ int index,
+ Handle<Map> transition,
+ Handle<String> name);
- MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
+ Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
- MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics);
+ Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps);
static void GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array);
+ bool is_js_array,
+ ElementsKind element_kind);
static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
bool is_js_array);
@@ -715,8 +694,8 @@ class KeyedStoreStubCompiler: public StubCompiler {
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
private:
- MaybeObject* GetCode(PropertyType type,
- String* name,
+ Handle<Code> GetCode(PropertyType type,
+ Handle<String> name,
InlineCacheState state = MONOMORPHIC);
StrictModeFlag strict_mode_;
@@ -739,105 +718,97 @@ class CallOptimization;
class CallStubCompiler: public StubCompiler {
public:
- CallStubCompiler(int argc,
+ CallStubCompiler(Isolate* isolate,
+ int argc,
Code::Kind kind,
- Code::ExtraICState extra_ic_state,
+ Code::ExtraICState extra_state,
InlineCacheHolderFlag cache_holder);
- MUST_USE_RESULT MaybeObject* CompileCallField(
- JSObject* object,
- JSObject* holder,
- int index,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileCallConstant(
- Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check);
-
- MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
- JSObject* object,
- JSObject* holder,
- String* name);
-
- MUST_USE_RESULT MaybeObject* CompileCallGlobal(
- JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
- static bool HasCustomCallGenerator(JSFunction* function);
+ Handle<Code> CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ int index,
+ Handle<String> name);
+
+ Handle<Code> CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
+ CheckType check);
+
+ Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name);
+
+ Handle<Code> CompileCallGlobal(Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name);
+
+ static bool HasCustomCallGenerator(Handle<JSFunction> function);
private:
- // Compiles a custom call constant/global IC. For constant calls
- // cell is NULL. Returns undefined if there is no custom call code
- // for the given function or it can't be generated.
- MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
-
-#define DECLARE_CALL_GENERATOR(name) \
- MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object, \
- JSObject* holder, \
- JSGlobalPropertyCell* cell, \
- JSFunction* function, \
- String* fname);
+ // Compiles a custom call constant/global IC. For constant calls cell is
+ // NULL. Returns an empty handle if there is no custom call code for the
+ // given function.
+ Handle<Code> CompileCustomCall(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name);
+
+#define DECLARE_CALL_GENERATOR(name) \
+ Handle<Code> Compile##name##Call(Handle<Object> object, \
+ Handle<JSObject> holder, \
+ Handle<JSGlobalPropertyCell> cell, \
+ Handle<JSFunction> function, \
+ Handle<String> fname);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
- MUST_USE_RESULT MaybeObject* CompileFastApiCall(
- const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
+ Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name);
- const ParameterCount arguments_;
- const Code::Kind kind_;
- const Code::ExtraICState extra_ic_state_;
- const InlineCacheHolderFlag cache_holder_;
+ Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Handle<JSFunction> function);
const ParameterCount& arguments() { return arguments_; }
- MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
-
- // Convenience function. Calls GetCode above passing
- // CONSTANT_FUNCTION type and the name of the given function.
- MUST_USE_RESULT MaybeObject* GetCode(JSFunction* function);
+ void GenerateNameCheck(Handle<String> name, Label* miss);
- void GenerateNameCheck(String* name, Label* miss);
-
- void GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+ void GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss);
// Generates code to load the function from the cell checking that
// it still contains the same function.
- void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
+ void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
Label* miss);
- // Generates a jump to CallIC miss stub. Returns Failure if the jump cannot
- // be generated.
- MUST_USE_RESULT MaybeObject* GenerateMissBranch();
+ // Generates a jump to CallIC miss stub.
+ void GenerateMissBranch();
+
+ const ParameterCount arguments_;
+ const Code::Kind kind_;
+ const Code::ExtraICState extra_state_;
+ const InlineCacheHolderFlag cache_holder_;
};
class ConstructStubCompiler: public StubCompiler {
public:
- explicit ConstructStubCompiler() {}
+ explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
- MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
+ Handle<Code> CompileConstructStub(Handle<JSFunction> function);
private:
- MaybeObject* GetCode();
+ Handle<Code> GetCode();
};
@@ -846,14 +817,14 @@ class CallOptimization BASE_EMBEDDED {
public:
explicit CallOptimization(LookupResult* lookup);
- explicit CallOptimization(JSFunction* function);
+ explicit CallOptimization(Handle<JSFunction> function);
bool is_constant_call() const {
- return constant_function_ != NULL;
+ return !constant_function_.is_null();
}
- JSFunction* constant_function() const {
- ASSERT(constant_function_ != NULL);
+ Handle<JSFunction> constant_function() const {
+ ASSERT(is_constant_call());
return constant_function_;
}
@@ -861,32 +832,32 @@ class CallOptimization BASE_EMBEDDED {
return is_simple_api_call_;
}
- FunctionTemplateInfo* expected_receiver_type() const {
- ASSERT(is_simple_api_call_);
+ Handle<FunctionTemplateInfo> expected_receiver_type() const {
+ ASSERT(is_simple_api_call());
return expected_receiver_type_;
}
- CallHandlerInfo* api_call_info() const {
- ASSERT(is_simple_api_call_);
+ Handle<CallHandlerInfo> api_call_info() const {
+ ASSERT(is_simple_api_call());
return api_call_info_;
}
// Returns the depth of the object having the expected type in the
// prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const;
+ int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
+ Handle<JSObject> holder) const;
private:
- void Initialize(JSFunction* function);
+ void Initialize(Handle<JSFunction> function);
// Determines whether the given function can be called using the
// fast api call builtin.
- void AnalyzePossibleApiFunction(JSFunction* function);
+ void AnalyzePossibleApiFunction(Handle<JSFunction> function);
- JSFunction* constant_function_;
+ Handle<JSFunction> constant_function_;
bool is_simple_api_call_;
- FunctionTemplateInfo* expected_receiver_type_;
- CallHandlerInfo* api_call_info_;
+ Handle<FunctionTemplateInfo> expected_receiver_type_;
+ Handle<CallHandlerInfo> api_call_info_;
};
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index eb825c1a7..7a2156c95 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -73,6 +73,7 @@ namespace internal {
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
+ T(INIT_CONST_HARMONY, "=init_const_harmony", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
T(ASSIGN_BIT_OR, "|=", 2) \
T(ASSIGN_BIT_XOR, "^=", 2) \
@@ -216,6 +217,10 @@ class Token {
return op == LT || op == LTE || op == GT || op == GTE;
}
+ static bool IsEqualityOp(Value op) {
+ return op == EQ || op == EQ_STRICT;
+ }
+
static Value NegateCompareOp(Value op) {
ASSERT(IsCompareOp(op));
switch (op) {
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index c64368e59..c781c615a 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -60,8 +60,10 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context) {
+ Handle<Context> global_context,
+ Isolate* isolate) {
global_context_ = global_context;
+ isolate_ = isolate;
BuildDictionary(code);
ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
}
@@ -71,12 +73,12 @@ Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
int entry = dictionary_->FindEntry(ast_id);
return entry != NumberDictionary::kNotFound
? Handle<Object>(dictionary_->ValueAt(entry))
- : Isolate::Current()->factory()->undefined_value();
+ : Handle<Object>::cast(isolate_->factory()->undefined_value());
}
bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code(GetInfo(expr->id()));
+ Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -90,10 +92,10 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
- Handle<Object> map_or_code(GetInfo(expr->id()));
+ Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = Isolate::Current()->builtins();
+ Builtins* builtins = isolate_->builtins();
return code->is_keyed_load_stub() &&
*code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
code->ic_state() == MEGAMORPHIC;
@@ -103,7 +105,7 @@ bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
- Handle<Object> map_or_code(GetInfo(expr->id()));
+ Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -116,10 +118,10 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
- Handle<Object> map_or_code(GetInfo(expr->id()));
+ Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = Isolate::Current()->builtins();
+ Builtins* builtins = isolate_->builtins();
return code->is_keyed_store_stub() &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
@@ -131,13 +133,13 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
Handle<Object> value = GetInfo(expr->id());
- return value->IsMap() || value->IsSmi();
+ return value->IsMap() || value->IsSmi() || value->IsJSFunction();
}
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code(GetInfo(expr->id()));
+ Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Map* first_map = code->FindFirstMap();
@@ -150,7 +152,7 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
ASSERT(StoreIsMonomorphicNormal(expr));
- Handle<Object> map_or_code(GetInfo(expr->id()));
+ Handle<Object> map_or_code = GetInfo(expr->id());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
return Handle<Map>(code->FindFirstMap());
@@ -203,6 +205,7 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
return check;
}
+
Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
CheckType check) {
JSFunction* function = NULL;
@@ -225,9 +228,14 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
}
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
+ return Handle<JSFunction>::cast(GetInfo(expr->id()));
+}
+
+
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
return *GetInfo(expr->id()) ==
- Isolate::Current()->builtins()->builtin(id);
+ isolate_->builtins()->builtin(id);
}
@@ -352,6 +360,10 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
return unknown;
case CompareIC::SMIS:
return TypeInfo::Smi();
+ case CompareIC::STRINGS:
+ return TypeInfo::String();
+ case CompareIC::SYMBOLS:
+ return TypeInfo::Symbol();
case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
case CompareIC::OBJECTS:
@@ -397,24 +409,33 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types) {
- Isolate* isolate = Isolate::Current();
Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return;
- if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+ if (*object ==
+ isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
// TODO(fschneider): We could collect the maps and signal that
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
} else if (object->IsMap()) {
types->Add(Handle<Map>::cast(object));
- } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+ } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
+ Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
types->Reserve(4);
ASSERT(object->IsCode());
- isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
+ isolate_->stub_cache()->CollectMatchingMaps(types, *name, flags);
}
}
+static void AddMapIfMissing(Handle<Map> map, SmallMapList* list) {
+ for (int i = 0; i < list->length(); ++i) {
+ if (list->at(i).is_identical_to(map)) return;
+ }
+ list->Add(map);
+}
+
+
void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
@@ -428,7 +449,7 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
if (object->IsMap()) {
- types->Add(Handle<Map>(Map::cast(object)));
+ AddMapIfMissing(Handle<Map>(Map::cast(object)), types);
}
}
}
@@ -488,49 +509,56 @@ void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
for (int i = 0; i < infos->length(); i++) {
+ RelocInfo reloc_entry = (*infos)[i];
+ Address target_address = reloc_entry.target_address();
unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
- Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
- ProcessTarget(ast_id, target);
- }
-}
-
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ switch (target->kind()) {
+ case Code::LOAD_IC:
+ case Code::STORE_IC:
+ case Code::CALL_IC:
+ case Code::KEYED_CALL_IC:
+ if (target->ic_state() == MONOMORPHIC) {
+ if (target->kind() == Code::CALL_IC &&
+ target->check_type() != RECEIVER_MAP_CHECK) {
+ SetInfo(ast_id, Smi::FromInt(target->check_type()));
+ } else {
+ Object* map = target->FindFirstMap();
+ SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
+ }
+ } else if (target->ic_state() == MEGAMORPHIC) {
+ SetInfo(ast_id, target);
+ }
+ break;
-void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
- switch (target->kind()) {
- case Code::LOAD_IC:
- case Code::STORE_IC:
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- if (target->ic_state() == MONOMORPHIC) {
- if (target->kind() == Code::CALL_IC &&
- target->check_type() != RECEIVER_MAP_CHECK) {
- SetInfo(ast_id, Smi::FromInt(target->check_type()));
- } else {
- Object* map = target->FindFirstMap();
- SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
+ case Code::KEYED_LOAD_IC:
+ case Code::KEYED_STORE_IC:
+ if (target->ic_state() == MONOMORPHIC ||
+ target->ic_state() == MEGAMORPHIC) {
+ SetInfo(ast_id, target);
}
- } else if (target->ic_state() == MEGAMORPHIC) {
- SetInfo(ast_id, target);
- }
- break;
+ break;
- case Code::KEYED_LOAD_IC:
- case Code::KEYED_STORE_IC:
- if (target->ic_state() == MONOMORPHIC ||
- target->ic_state() == MEGAMORPHIC) {
+ case Code::UNARY_OP_IC:
+ case Code::BINARY_OP_IC:
+ case Code::COMPARE_IC:
+ case Code::TO_BOOLEAN_IC:
SetInfo(ast_id, target);
- }
- break;
-
- case Code::UNARY_OP_IC:
- case Code::BINARY_OP_IC:
- case Code::COMPARE_IC:
- case Code::TO_BOOLEAN_IC:
- SetInfo(ast_id, target);
- break;
+ break;
+
+ case Code::STUB:
+ if (target->major_key() == CodeStub::CallFunction &&
+ target->has_function_cache()) {
+ Object* value = CallFunctionStub::GetCachedValue(reloc_entry.pc());
+ if (value->IsJSFunction()) {
+ SetInfo(ast_id, value);
+ }
+ }
+ break;
- default:
- break;
+ default:
+ break;
+ }
}
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 448e4c94e..7c9c05ef0 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -64,6 +64,8 @@ class TypeInfo {
static TypeInfo Integer32() { return TypeInfo(kInteger32); }
// We know it's a Smi.
static TypeInfo Smi() { return TypeInfo(kSmi); }
+ // We know it's a Symbol.
+ static TypeInfo Symbol() { return TypeInfo(kSymbol); }
// We know it's a heap number.
static TypeInfo Double() { return TypeInfo(kDouble); }
// We know it's a string.
@@ -137,6 +139,16 @@ class TypeInfo {
return ((type_ & kSmi) == kSmi);
}
+ inline bool IsSymbol() {
+ ASSERT(type_ != kUninitialized);
+ return ((type_ & kSymbol) == kSymbol);
+ }
+
+ inline bool IsNonSymbol() {
+ ASSERT(type_ != kUninitialized);
+ return ((type_ & kSymbol) == kString);
+ }
+
inline bool IsInteger32() {
ASSERT(type_ != kUninitialized);
return ((type_ & kInteger32) == kInteger32);
@@ -168,6 +180,7 @@ class TypeInfo {
case kNumber: return "Number";
case kInteger32: return "Integer32";
case kSmi: return "Smi";
+ case kSymbol: return "Symbol";
case kDouble: return "Double";
case kString: return "String";
case kNonPrimitive: return "Object";
@@ -186,6 +199,7 @@ class TypeInfo {
kSmi = 0x17, // 0010111
kDouble = 0x19, // 0011001
kString = 0x30, // 0110000
+ kSymbol = 0x32, // 0110010
kNonPrimitive = 0x40, // 1000000
kUninitialized = 0x7f // 1111111
};
@@ -216,7 +230,9 @@ class UnaryOperation;
class TypeFeedbackOracle BASE_EMBEDDED {
public:
- TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
+ TypeFeedbackOracle(Handle<Code> code,
+ Handle<Context> global_context,
+ Isolate* isolate);
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsMegamorphicWithTypeInfo(Property* expr);
@@ -243,6 +259,8 @@ class TypeFeedbackOracle BASE_EMBEDDED {
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
+ Handle<JSFunction> GetCallTarget(Call* expr);
+
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
@@ -273,13 +291,13 @@ class TypeFeedbackOracle BASE_EMBEDDED {
byte* old_start,
byte* new_start);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTarget(unsigned ast_id, Code* target);
// Returns an element from the backing store. Returns undefined if
// there is no information.
Handle<Object> GetInfo(unsigned ast_id);
Handle<Context> global_context_;
+ Isolate* isolate_;
Handle<NumberDictionary> dictionary_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 39fc34968..fb9e6339e 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,7 +44,7 @@ typedef unsigned char byte;
* The max length of the result of converting the case of a single
* character.
*/
-static const int kMaxMappingSize = 4;
+const int kMaxMappingSize = 4;
template <class T, int size = 256>
class Predicate {
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index c910d756b..e76104a70 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -111,47 +111,59 @@ function URIDecodeOctets(octets, result, index) {
var o1 = octets[1];
if (o0 < 0xe0) {
var a = o0 & 0x1f;
- if ((o1 < 0x80) || (o1 > 0xbf))
+ if ((o1 < 0x80) || (o1 > 0xbf)) {
throw new $URIError("URI malformed");
+ }
var b = o1 & 0x3f;
value = (a << 6) + b;
- if (value < 0x80 || value > 0x7ff)
+ if (value < 0x80 || value > 0x7ff) {
throw new $URIError("URI malformed");
+ }
} else {
var o2 = octets[2];
if (o0 < 0xf0) {
var a = o0 & 0x0f;
- if ((o1 < 0x80) || (o1 > 0xbf))
+ if ((o1 < 0x80) || (o1 > 0xbf)) {
throw new $URIError("URI malformed");
+ }
var b = o1 & 0x3f;
- if ((o2 < 0x80) || (o2 > 0xbf))
+ if ((o2 < 0x80) || (o2 > 0xbf)) {
throw new $URIError("URI malformed");
+ }
var c = o2 & 0x3f;
value = (a << 12) + (b << 6) + c;
- if ((value < 0x800) || (value > 0xffff))
+ if ((value < 0x800) || (value > 0xffff)) {
throw new $URIError("URI malformed");
+ }
} else {
var o3 = octets[3];
if (o0 < 0xf8) {
var a = (o0 & 0x07);
- if ((o1 < 0x80) || (o1 > 0xbf))
+ if ((o1 < 0x80) || (o1 > 0xbf)) {
throw new $URIError("URI malformed");
+ }
var b = (o1 & 0x3f);
- if ((o2 < 0x80) || (o2 > 0xbf))
+ if ((o2 < 0x80) || (o2 > 0xbf)) {
throw new $URIError("URI malformed");
+ }
var c = (o2 & 0x3f);
- if ((o3 < 0x80) || (o3 > 0xbf))
+ if ((o3 < 0x80) || (o3 > 0xbf)) {
throw new $URIError("URI malformed");
+ }
var d = (o3 & 0x3f);
value = (a << 18) + (b << 12) + (c << 6) + d;
- if ((value < 0x10000) || (value > 0x10ffff))
+ if ((value < 0x10000) || (value > 0x10ffff)) {
throw new $URIError("URI malformed");
+ }
} else {
throw new $URIError("URI malformed");
}
}
}
}
+ if (0xD800 <= value && value <= 0xDFFF) {
+ throw new $URIError("URI malformed");
+ }
if (value < 0x10000) {
result[index++] = value;
return index;
@@ -207,14 +219,15 @@ function Decode(uri, reserved) {
var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
if (cc >> 7) {
var n = 0;
- while (((cc << ++n) & 0x80) != 0) ;
+ while (((cc << ++n) & 0x80) != 0) { }
if (n == 1 || n > 4) throw new $URIError("URI malformed");
var octets = new $Array(n);
octets[0] = cc;
if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
for (var i = 1; i < n; i++) {
if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
- octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
+ octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
+ uri.charCodeAt(++k));
}
index = URIDecodeOctets(octets, result, index);
} else {
@@ -254,7 +267,7 @@ function URIDecode(uri) {
if (63 <= cc && cc <= 64) return true;
return false;
- };
+ }
var string = ToString(uri);
return Decode(string, reservedPredicate);
}
@@ -262,7 +275,7 @@ function URIDecode(uri) {
// ECMA-262 - 15.1.3.2.
function URIDecodeComponent(component) {
- function reservedPredicate(cc) { return false; };
+ function reservedPredicate(cc) { return false; }
var string = ToString(component);
return Decode(string, reservedPredicate);
}
@@ -303,7 +316,7 @@ function URIEncode(uri) {
if (cc == 126) return true;
return false;
- };
+ }
var string = ToString(uri);
return Encode(string, unescapePredicate);
@@ -326,7 +339,7 @@ function URIEncodeComponent(component) {
if (cc == 126) return true;
return false;
- };
+ }
var string = ToString(component);
return Encode(string, unescapePredicate);
@@ -366,7 +379,9 @@ function CharCodeToHex4Str(cc) {
function IsValidHex(s) {
for (var i = 0; i < s.length; ++i) {
var cc = s.charCodeAt(i);
- if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
+ if ((48 <= cc && cc <= 57) ||
+ (65 <= cc && cc <= 70) ||
+ (97 <= cc && cc <= 102)) {
// '0'..'9', 'A'..'F' and 'a' .. 'f'.
} else {
return false;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 26c522b89..68b1517de 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -47,13 +47,13 @@ namespace internal {
// Returns true iff x is a power of 2 (or zero). Cannot be used with the
// maximally negative value of the type T (the -1 overflows).
template <typename T>
-static inline bool IsPowerOf2(T x) {
+inline bool IsPowerOf2(T x) {
return IS_POWER_OF_TWO(x);
}
// X must be a power of 2. Returns the number of trailing zeros.
-static inline int WhichPowerOf2(uint32_t x) {
+inline int WhichPowerOf2(uint32_t x) {
ASSERT(IsPowerOf2(x));
ASSERT(x != 0);
int bits = 0;
@@ -88,7 +88,7 @@ static inline int WhichPowerOf2(uint32_t x) {
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
-static inline int ArithmeticShiftRight(int x, int s) {
+inline int ArithmeticShiftRight(int x, int s) {
return x >> s;
}
@@ -97,7 +97,7 @@ static inline int ArithmeticShiftRight(int x, int s) {
// This allows conversion of Addresses and integral types into
// 0-relative int offsets.
template <typename T>
-static inline intptr_t OffsetFrom(T x) {
+inline intptr_t OffsetFrom(T x) {
return x - static_cast<T>(0);
}
@@ -106,14 +106,14 @@ static inline intptr_t OffsetFrom(T x) {
// This allows conversion of 0-relative int offsets into Addresses and
// integral types.
template <typename T>
-static inline T AddressFrom(intptr_t x) {
+inline T AddressFrom(intptr_t x) {
return static_cast<T>(static_cast<T>(0) + x);
}
// Return the largest multiple of m which is <= x.
template <typename T>
-static inline T RoundDown(T x, int m) {
+inline T RoundDown(T x, intptr_t m) {
ASSERT(IsPowerOf2(m));
return AddressFrom<T>(OffsetFrom(x) & -m);
}
@@ -121,13 +121,13 @@ static inline T RoundDown(T x, int m) {
// Return the smallest multiple of m which is >= x.
template <typename T>
-static inline T RoundUp(T x, int m) {
- return RoundDown(x + m - 1, m);
+inline T RoundUp(T x, intptr_t m) {
+ return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
template <typename T>
-static int Compare(const T& a, const T& b) {
+int Compare(const T& a, const T& b) {
if (a == b)
return 0;
else if (a < b)
@@ -138,16 +138,26 @@ static int Compare(const T& a, const T& b) {
template <typename T>
-static int PointerValueCompare(const T* a, const T* b) {
+int PointerValueCompare(const T* a, const T* b) {
return Compare<T>(*a, *b);
}
+// Compare function to compare the object pointer value of two
+// handlified objects. The handles are passed as pointers to the
+// handles.
+template<typename T> class Handle; // Forward declaration.
+template <typename T>
+int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
+ return Compare<T*>(*(*a), *(*b));
+}
+
+
// Returns the smallest power of two which is >= x. If you pass in a
// number that is already a power of two, it is returned as is.
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
// figure 3-3, page 48, where the function is called clp2.
-static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+inline uint32_t RoundUpToPowerOf2(uint32_t x) {
ASSERT(x <= 0x80000000u);
x = x - 1;
x = x | (x >> 1);
@@ -159,18 +169,23 @@ static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
}
+inline uint32_t RoundDownToPowerOf2(uint32_t x) {
+ uint32_t rounded_up = RoundUpToPowerOf2(x);
+ if (rounded_up > x) return rounded_up >> 1;
+ return rounded_up;
+}
+
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
- ASSERT(IsPowerOf2(alignment));
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
return (value & (alignment - 1)) == 0;
}
// Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr,
- intptr_t alignment,
- int offset) {
+inline bool IsAddressAligned(Address addr,
+ intptr_t alignment,
+ int offset = 0) {
intptr_t offs = OffsetFrom(addr + offset);
return IsAligned(offs, alignment);
}
@@ -178,14 +193,14 @@ static inline bool IsAddressAligned(Address addr,
// Returns the maximum of the two parameters.
template <typename T>
-static T Max(T a, T b) {
+T Max(T a, T b) {
return a < b ? b : a;
}
// Returns the minimum of the two parameters.
template <typename T>
-static T Min(T a, T b) {
+T Min(T a, T b) {
return a < b ? a : b;
}
@@ -239,7 +254,7 @@ class BitField {
// Thomas Wang, Integer Hash Functions.
// http://www.concentric.net/~Ttwang/tech/inthash.htm
-static inline uint32_t ComputeIntegerHash(uint32_t key) {
+inline uint32_t ComputeIntegerHash(uint32_t key) {
uint32_t hash = key;
hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
hash = hash ^ (hash >> 12);
@@ -251,7 +266,19 @@ static inline uint32_t ComputeIntegerHash(uint32_t key) {
}
-static inline uint32_t ComputePointerHash(void* ptr) {
+inline uint32_t ComputeLongHash(uint64_t key) {
+ uint64_t hash = key;
+ hash = ~hash + (hash << 18); // hash = (hash << 18) - hash - 1;
+ hash = hash ^ (hash >> 31);
+ hash = hash * 21; // hash = (hash + (hash << 2)) + (hash << 4);
+ hash = hash ^ (hash >> 11);
+ hash = hash + (hash << 6);
+ hash = hash ^ (hash >> 22);
+ return (uint32_t) hash;
+}
+
+
+inline uint32_t ComputePointerHash(void* ptr) {
return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
}
@@ -707,7 +734,7 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
// Compare ASCII/16bit chars to ASCII/16bit chars.
template <typename lchar, typename rchar>
-static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
const lchar* limit = lhs + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*lhs) == sizeof(*rhs)) {
@@ -734,7 +761,7 @@ static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
// Calculate 10^exponent.
-static inline int TenToThe(int exponent) {
+inline int TenToThe(int exponent) {
ASSERT(exponent <= 9);
ASSERT(exponent >= 1);
int answer = 10;
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 2de830300..47341e72c 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -107,7 +107,10 @@ namespace internal {
SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
/* Number of code objects found from pc. */ \
SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached)
+ SC(pc_to_code_cached, V8.PcToCodeCached) \
+ /* The store-buffer implementation of the write barrier. */ \
+ SC(store_buffer_compactions, V8.StoreBufferCompactions) \
+ SC(store_buffer_overflows, V8.StoreBufferOverflows)
#define STATS_COUNTER_LIST_2(SC) \
@@ -126,10 +129,6 @@ namespace internal {
V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- SC(map_to_fast_elements, V8.MapToFastElements) \
- SC(map_to_fast_double_elements, V8.MapToFastDoubleElements) \
- SC(map_to_slow_elements, V8.MapToSlowElements) \
- SC(map_to_external_array_elements, V8.MapToExternalArrayElements) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 1e9b5dc14..634c5c3e7 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -38,6 +38,7 @@
#include "log.h"
#include "runtime-profiler.h"
#include "serialize.h"
+#include "store-buffer.h"
namespace v8 {
namespace internal {
@@ -56,6 +57,15 @@ static EntropySource entropy_source;
bool V8::Initialize(Deserializer* des) {
+ // Setting --harmony implies all other harmony flags.
+ // TODO(rossberg): Is there a better place to put this?
+ if (FLAG_harmony) {
+ FLAG_harmony_typeof = true;
+ FLAG_harmony_scoping = true;
+ FLAG_harmony_proxies = true;
+ FLAG_harmony_collections = true;
+ }
+
InitializeOncePerProcess();
// The current thread may not yet had entered an isolate to run.
@@ -140,9 +150,10 @@ void V8::SetEntropySource(EntropySource source) {
// Used by JavaScript APIs
-uint32_t V8::Random(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- return random_base(isolate->random_seed());
+uint32_t V8::Random(Context* context) {
+ ASSERT(context->IsGlobalContext());
+ ByteArray* seed = context->random_seed();
+ return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
}
@@ -155,13 +166,13 @@ uint32_t V8::RandomPrivate(Isolate* isolate) {
}
-bool V8::IdleNotification() {
+bool V8::IdleNotification(int hint) {
// Returning true tells the caller that there is no need to call
// IdleNotification again.
if (!FLAG_use_idle_notification) return true;
// Tell the heap that it may want to adjust.
- return HEAP->IdleNotification();
+ return HEAP->IdleNotification(hint);
}
@@ -172,8 +183,9 @@ typedef union {
} double_int_union;
-Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
- uint64_t random_bits = Random(isolate);
+Object* V8::FillHeapNumberWithRandom(Object* heap_number,
+ Context* context) {
+ uint64_t random_bits = Random(context);
// Make a double* from address (heap_number + sizeof(double)).
double_int_union* r = reinterpret_cast<double_int_union*>(
reinterpret_cast<char*>(heap_number) +
@@ -215,6 +227,12 @@ void V8::InitializeOncePerProcess() {
FLAG_peephole_optimization = !use_crankshaft_;
ElementsAccessor::InitializeOncePerProcess();
+
+ if (FLAG_stress_compaction) {
+ FLAG_force_marking_deque_overflows = true;
+ FLAG_gc_global = true;
+ FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
+ }
}
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index e565ca5ae..71e7fe4bf 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -60,10 +60,11 @@
#include "objects-inl.h"
#include "spaces-inl.h"
#include "heap-inl.h"
+#include "incremental-marking-inl.h"
+#include "mark-compact-inl.h"
#include "log-inl.h"
#include "cpu-profiler-inl.h"
#include "handles-inl.h"
-#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -95,17 +96,17 @@ class V8 : public AllStatic {
// generation.
static void SetEntropySource(EntropySource source);
// Random number generation support. Not cryptographically safe.
- static uint32_t Random(Isolate* isolate);
+ static uint32_t Random(Context* context);
// We use random numbers internally in memory allocation and in the
// compilers for security. In order to prevent information leaks we
// use a separate random state for internal random number
// generation.
static uint32_t RandomPrivate(Isolate* isolate);
static Object* FillHeapNumberWithRandom(Object* heap_number,
- Isolate* isolate);
+ Context* context);
// Idle notification directly from the API.
- static bool IdleNotification();
+ static bool IdleNotification(int hint);
private:
static void InitializeOncePerProcess();
@@ -124,6 +125,15 @@ class V8 : public AllStatic {
static bool use_crankshaft_;
};
+
+// JavaScript defines two kinds of 'nil'.
+enum NilValue { kNullValue, kUndefinedValue };
+
+
+// JavaScript defines two kinds of equality.
+enum EqualityKind { kStrictEquality, kNonStrictEquality };
+
+
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/v8conversions.h b/deps/v8/src/v8conversions.h
index 1840e3a34..0147d8c37 100644
--- a/deps/v8/src/v8conversions.h
+++ b/deps/v8/src/v8conversions.h
@@ -34,13 +34,13 @@ namespace v8 {
namespace internal {
// Convert from Number object to C integer.
-static inline int32_t NumberToInt32(Object* number) {
+inline int32_t NumberToInt32(Object* number) {
if (number->IsSmi()) return Smi::cast(number)->value();
return DoubleToInt32(number->Number());
}
-static inline uint32_t NumberToUint32(Object* number) {
+inline uint32_t NumberToUint32(Object* number) {
if (number->IsSmi()) return Smi::cast(number)->value();
return DoubleToUint32(number->Number());
}
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index eb5c49d75..005cdbdae 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -29,6 +29,7 @@
#define V8_V8GLOBALS_H_
#include "globals.h"
+#include "checks.h"
namespace v8 {
namespace internal {
@@ -79,18 +80,20 @@ const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
+const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
const uint32_t kSlotsZapValue = 0xbeefdeef;
const uint32_t kDebugZapValue = 0xbadbaddb;
+const uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif
-// Number of bits to represent the page size for paged spaces. The value of 13
-// gives 8K bytes per page.
-const int kPageSizeBits = 13;
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
@@ -98,10 +101,6 @@ const int kPageSizeBits = 13;
const int kProcessorCacheLineSize = 64;
// Constants relevant to double precision floating point numbers.
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
@@ -128,9 +127,9 @@ class DescriptorArray;
class Expression;
class ExternalReference;
class FixedArray;
-class FunctionEntry;
class FunctionLiteral;
class FunctionTemplateInfo;
+class MemoryChunk;
class NumberDictionary;
class StringDictionary;
template <typename T> class Handle;
@@ -161,8 +160,7 @@ class RegExpTree;
class RegExpCompiler;
class RegExpVisitor;
class Scope;
-template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-class SerializedScopeInfo;
+class ScopeInfo;
class Script;
class Slot;
class Smi;
@@ -254,12 +252,6 @@ struct CodeDesc {
};
-// Callback function on object slots, used for iterating heap object slots in
-// HeapObjects, global pointers to heap objects, etc. The callback allows the
-// callback function to change the value of the slot.
-typedef void (*ObjectSlotCallback)(HeapObject** pointer);
-
-
// Callback function used for iterating objects in heap spaces,
// for example, scanning heap objects.
typedef int (*HeapObjectCallback)(HeapObject* obj);
@@ -306,7 +298,9 @@ enum CallFunctionFlags {
NO_CALL_FUNCTION_FLAGS = 0,
// Receiver might implicitly be the global objects. If it is, the
// hole is passed to the call function stub.
- RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
+ RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
+ // The call target is cached in the instruction stream.
+ RECORD_CALL_TARGET = 1 << 1
};
@@ -316,28 +310,17 @@ enum InlineCacheHolderFlag {
};
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
- NORMAL = 0, // only in slow mode
- FIELD = 1, // only in fast mode
- CONSTANT_FUNCTION = 2, // only in fast mode
- CALLBACKS = 3,
- HANDLER = 4, // only in lookup results, not in descriptors
- INTERCEPTOR = 5, // only in lookup results, not in descriptors
- MAP_TRANSITION = 6, // only in fast mode
- ELEMENTS_TRANSITION = 7,
- CONSTANT_TRANSITION = 8, // only in fast mode
- NULL_DESCRIPTOR = 9, // only in fast mode
- // All properties before MAP_TRANSITION are real.
- FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
- // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
- // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
- // nonexistent properties.
- NONEXISTENT = NULL_DESCRIPTOR
-};
+// The Store Buffer (GC).
+typedef enum {
+ kStoreBufferFullEvent,
+ kStoreBufferStartScanningPagesEvent,
+ kStoreBufferScanningPageEvent
+} StoreBufferEvent;
+
+
+typedef void (*StoreBufferCallback)(Heap* heap,
+ MemoryChunk* page,
+ StoreBufferEvent event);
// Whether to remove map transitions and constant transitions from a
@@ -474,21 +457,11 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
SAHF = 0, // x86
FPU = 1}; // MIPS
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum StrictModeFlag {
- kNonStrictMode,
- kStrictMode,
- // This value is never used, but is needed to prevent GCC 4.5 from failing
- // to compile when we assert that a flag is either kNonStrictMode or
- // kStrictMode.
- kInvalidStrictFlag
-};
-
// Used to specify if a macro instruction must perform a smi check on tagged
// values.
enum SmiCheckType {
- DONT_DO_SMI_CHECK = 0,
+ DONT_DO_SMI_CHECK,
DO_SMI_CHECK
};
@@ -496,20 +469,105 @@ enum SmiCheckType {
// Used to specify whether a receiver is implicitly or explicitly
// provided to a call.
enum CallKind {
- CALL_AS_METHOD = 0,
+ CALL_AS_METHOD,
CALL_AS_FUNCTION
};
-static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+enum ScopeType {
+ EVAL_SCOPE, // The top-level scope for an eval source.
+ FUNCTION_SCOPE, // The top-level scope for a function.
+ GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
+ CATCH_SCOPE, // The scope introduced by catch.
+ BLOCK_SCOPE, // The scope introduced by a new block.
+ WITH_SCOPE // The scope introduced by with.
+};
+
+
+const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
const uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
const uint64_t kLastNonNaNInt64 =
(static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
+
+enum VariableMode {
+ // User declared variables:
+ VAR, // declared via 'var', and 'function' declarations
+
+ CONST, // declared via 'const' declarations
+
+ CONST_HARMONY, // declared via 'const' declarations in harmony mode
+
+ LET, // declared via 'let' declarations
+
+ // Variables introduced by the compiler:
+ DYNAMIC, // always require dynamic lookup (we don't know
+ // the declaration)
+
+ DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
+ // variable is global unless it has been shadowed
+ // by an eval-introduced variable
+
+ DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
+
+ INTERNAL, // like VAR, but not user-visible (may or may not
+ // be in a context)
+
+ TEMPORARY // temporary variables (not user-visible), never
+ // in a context
+};
+
+
+// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
+// and immutable bindings that can be in two states: initialized and
+// uninitialized. In ES5 only immutable bindings have these two states. When
+// accessing a binding, it needs to be checked for initialization. However in
+// the following cases the binding is initialized immediately after creation
+// so the initialization check can always be skipped:
+// 1. Var declared local variables.
+// var foo;
+// 2. A local variable introduced by a function declaration.
+// function foo() {}
+// 3. Parameters
+// function x(foo) {}
+// 4. Catch bound variables.
+// try {} catch (foo) {}
+// 6. Function variables of named function expressions.
+// var x = function foo() {}
+// 7. Implicit binding of 'this'.
+// 8. Implicit binding of 'arguments' in functions.
+//
+// ES5 specified object environment records which are introduced by ES elements
+// such as Program and WithStatement that associate identifier bindings with the
+// properties of some object. In the specification only mutable bindings exist
+// (which may be non-writable) and have no distinct initialization step. However
+// V8 allows const declarations in global code with distinct creation and
+// initialization steps which are represented by non-writable properties in the
+// global object. As a result also these bindings need to be checked for
+// initialization.
+//
+// The following enum specifies a flag that indicates if the binding needs a
+// distinct initialization step (kNeedsInitialization) or if the binding is
+// immediately initialized upon creation (kCreatedInitialized).
+enum InitializationFlag {
+ kNeedsInitialization,
+ kCreatedInitialized
+};
+
+
+enum ClearExceptionFlag {
+ KEEP_EXCEPTION,
+ CLEAR_EXCEPTION
+};
+
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index 901e78d29..f71de8207 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -60,6 +60,10 @@ class Memory {
return *reinterpret_cast<int*>(addr);
}
+ static unsigned& unsigned_at(Address addr) {
+ return *reinterpret_cast<unsigned*>(addr);
+ }
+
static double& double_at(Address addr) {
return *reinterpret_cast<double*>(addr);
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 588bdb21b..11b1a7eeb 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -60,18 +60,6 @@ function InstallFunctions(object, attributes, functions) {
%ToFastProperties(object);
}
-// Emulates JSC by installing functions on a hidden prototype that
-// lies above the current object/prototype. This lets you override
-// functions on String.prototype etc. and then restore the old function
-// with delete. See http://code.google.com/p/chromium/issues/detail?id=1717
-function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
- %CheckIsBootstrapping();
- var hidden_prototype = new $Object();
- %SetHiddenPrototype(object, hidden_prototype);
- InstallFunctions(hidden_prototype, attributes, functions);
-}
-
-
// Prevents changes to the prototype of a built-infunction.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
@@ -139,8 +127,9 @@ function GlobalParseInt(string, radix) {
// The spec says ToString should be evaluated before ToInt32.
string = TO_STRING_INLINE(string);
radix = TO_INT32(radix);
- if (!(radix == 0 || (2 <= radix && radix <= 36)))
+ if (!(radix == 0 || (2 <= radix && radix <= 36))) {
return $NaN;
+ }
}
if (%_HasCachedArrayIndex(string) &&
@@ -162,28 +151,23 @@ function GlobalParseFloat(string) {
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
- var receiver = this;
var global_receiver = %GlobalReceiver(global);
-
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
- receiver = global_receiver;
- }
-
- var this_is_global_receiver = (receiver === global_receiver);
var global_is_detached = (global === global_receiver);
// For consistency with JSC we require the global object passed to
// eval to be the global object from which 'eval' originated. This
// is not mandated by the spec.
- if (!this_is_global_receiver || global_is_detached) {
- throw new $EvalError('The "this" object passed to eval must ' +
+ // We only throw if the global has been detached, since we need the
+ // receiver as this-value for the call.
+ if (global_is_detached) {
+ throw new $EvalError('The "this" value passed to eval must ' +
'be the global object from which eval originated');
}
var f = %CompileString(x);
if (!IS_FUNCTION(f)) return f;
- return %_CallFunction(receiver, f);
+ return %_CallFunction(global_receiver, f);
}
@@ -193,13 +177,14 @@ function GlobalEval(x) {
function SetUpGlobal() {
%CheckIsBootstrapping();
// ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
+ %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
+ %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
+ %SetProperty(global, "undefined", void 0,
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
// Set up non-enumerable function on the global object.
InstallFunctions(global, DONT_ENUM, $Array(
@@ -299,7 +284,8 @@ function ObjectDefineGetter(name, fun) {
receiver = %GlobalReceiver(global);
}
if (!IS_SPEC_FUNCTION(fun)) {
- throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
+ throw new $TypeError(
+ 'Object.prototype.__defineGetter__: Expecting function');
}
var desc = new PropertyDescriptor();
desc.setGet(fun);
@@ -345,8 +331,9 @@ function ObjectLookupSetter(name) {
function ObjectKeys(obj) {
- if (!IS_SPEC_OBJECT(obj))
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
+ }
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
var names = CallTrap0(handler, "keys", DerivedKeysTrap);
@@ -372,6 +359,7 @@ function IsDataDescriptor(desc) {
// ES5 8.10.3.
function IsGenericDescriptor(desc) {
+ if (IS_UNDEFINED(desc)) return false;
return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
}
@@ -476,7 +464,7 @@ function ToPropertyDescriptor(obj) {
// For Harmony proxies.
function ToCompletePropertyDescriptor(obj) {
- var desc = ToPropertyDescriptor(obj)
+ var desc = ToPropertyDescriptor(obj);
if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
if (!desc.hasValue()) desc.setValue(void 0);
if (!desc.hasWritable()) desc.setWritable(false);
@@ -708,7 +696,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (should_throw) {
throw MakeTypeError("define_disallowed", [p]);
} else {
- return;
+ return false;
}
}
@@ -738,7 +726,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
- return;
+ return false;
}
}
// Step 8
@@ -748,7 +736,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
- return;
+ return false;
}
}
// Step 10a
@@ -757,7 +745,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
- return;
+ return false;
}
}
if (!current.isWritable() && desc.hasValue() &&
@@ -765,7 +753,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
- return;
+ return false;
}
}
}
@@ -775,14 +763,14 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
- return;
+ return false;
}
}
if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
- return;
+ return false;
}
}
}
@@ -860,17 +848,19 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
- if (!IS_SPEC_OBJECT(obj))
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
+ }
return %GetPrototype(obj);
}
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
- if (!IS_SPEC_OBJECT(obj))
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object",
["getOwnPropertyDescriptor"]);
+ }
var desc = GetOwnProperty(obj, p);
return FromPropertyDescriptor(desc);
}
@@ -883,14 +873,14 @@ function ToStringArray(obj, trap) {
}
var n = ToUint32(obj.length);
var array = new $Array(n);
- var names = {}
+ var names = {}; // TODO(rossberg): use sets once they are ready.
for (var index = 0; index < n; index++) {
var s = ToString(obj[index]);
if (s in names) {
- throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
+ throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
}
array[index] = s;
- names.s = 0;
+ names[s] = 0;
}
return array;
}
@@ -898,9 +888,10 @@ function ToStringArray(obj, trap) {
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
- if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
-
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object",
+ ["getOwnPropertyNames"]);
+ }
// Special handling for proxies.
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
@@ -917,8 +908,9 @@ function ObjectGetOwnPropertyNames(obj) {
if (%GetInterceptorInfo(obj) & 1) {
var indexedInterceptorNames =
%GetIndexedInterceptorElementNames(obj);
- if (indexedInterceptorNames)
+ if (indexedInterceptorNames) {
propertyNames = propertyNames.concat(indexedInterceptorNames);
+ }
}
// Find all the named properties.
@@ -944,8 +936,9 @@ function ObjectGetOwnPropertyNames(obj) {
// We need to check for the exact property value since for intrinsic
// properties like toString if(propertySet["toString"]) will always
// succeed.
- if (propertySet[name] === true)
+ if (propertySet[name] === true) {
continue;
+ }
propertySet[name] = true;
propertyNames[j++] = name;
}
@@ -1021,14 +1014,17 @@ function GetOwnEnumerablePropertyNames(properties) {
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- if (!IS_SPEC_OBJECT(obj))
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
+ }
var props = ToObject(properties);
var names = GetOwnEnumerablePropertyNames(props);
+ var descriptors = new InternalArray();
for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = ToPropertyDescriptor(props[name]);
- DefineOwnProperty(obj, name, desc, true);
+ descriptors.push(ToPropertyDescriptor(props[names[i]]));
+ }
+ for (var i = 0; i < names.length; i++) {
+ DefineOwnProperty(obj, names[i], descriptors[i], true);
}
return obj;
}
@@ -1042,12 +1038,20 @@ function ProxyFix(obj) {
throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
}
- if (IS_SPEC_FUNCTION(obj)) {
+ if (%IsJSFunctionProxy(obj)) {
var callTrap = %GetCallTrap(obj);
var constructTrap = %GetConstructTrap(obj);
var code = DelegateCallAndConstruct(callTrap, constructTrap);
%Fix(obj); // becomes a regular function
%SetCode(obj, code);
+ // TODO(rossberg): What about length and other properties? Not specified.
+ // We just put in some half-reasonable defaults for now.
+ var prototype = new $Object();
+ $Object.defineProperty(prototype, "constructor",
+ {value: obj, writable: true, enumerable: false, configurable: true});
+ // TODO(v8:1530): defineProperty does not handle prototype and length.
+ %FunctionSetPrototype(obj, prototype);
+ obj.length = 0;
} else {
%Fix(obj);
}
@@ -1237,8 +1241,9 @@ function BooleanToString() {
function BooleanValueOf() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
- if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
+ if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
throw new $TypeError('Boolean.prototype.valueOf is not generic');
+ }
return %_ValueOf(this);
}
@@ -1278,8 +1283,9 @@ function NumberToString(radix) {
// 'this'. This is not as dictated by ECMA-262.
var number = this;
if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this))
+ if (!IS_NUMBER_WRAPPER(this)) {
throw new $TypeError('Number.prototype.toString is not generic');
+ }
// Get the value of this number in case it's an object.
number = %_ValueOf(this);
}
@@ -1312,8 +1318,9 @@ function NumberToLocaleString() {
function NumberValueOf() {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
- if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
+ if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) {
throw new $TypeError('Number.prototype.valueOf is not generic');
+ }
return %_ValueOf(this);
}
@@ -1339,7 +1346,8 @@ function NumberToExponential(fractionDigits) {
if (!IS_UNDEFINED(fractionDigits)) {
f = TO_INTEGER(fractionDigits);
if (f < 0 || f > 20) {
- throw new $RangeError("toExponential() argument must be between 0 and 20");
+ throw new $RangeError(
+ "toExponential() argument must be between 0 and 20");
}
}
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@@ -1383,7 +1391,8 @@ function SetUpNumber() {
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.2.
- %SetProperty($Number, "MIN_VALUE", 5e-324, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($Number, "MIN_VALUE", 5e-324,
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.3.
%SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1455,53 +1464,54 @@ function FunctionToString() {
// ES5 15.3.4.5
function FunctionBind(this_arg) { // Length is 1.
if (!IS_SPEC_FUNCTION(this)) {
- throw new $TypeError('Bind must be called on a function');
- }
- // this_arg is not an argument that should be bound.
- var argc_bound = (%_ArgumentsLength() || 1) - 1;
- var fn = this;
-
- if (argc_bound == 0) {
- var result = function() {
- if (%_IsConstructCall()) {
- // %NewObjectFromBound implicitly uses arguments passed to this
- // function. We do not pass the arguments object explicitly to avoid
- // materializing it and guarantee that this function will be optimized.
- return %NewObjectFromBound(fn, null);
- }
- return %Apply(fn, this_arg, arguments, 0, %_ArgumentsLength());
- };
- } else {
- var bound_args = new InternalArray(argc_bound);
- for(var i = 0; i < argc_bound; i++) {
- bound_args[i] = %_Arguments(i+1);
+ throw new $TypeError('Bind must be called on a function');
+ }
+ var boundFunction = function () {
+ // Poison .arguments and .caller, but is otherwise not detectable.
+ "use strict";
+ // This function must not use any object literals (Object, Array, RegExp),
+ // since the literals-array is being used to store the bound data.
+ if (%_IsConstructCall()) {
+ return %NewObjectFromBound(boundFunction);
}
+ var bindings = %BoundFunctionGetBindings(boundFunction);
- var result = function() {
- // If this is a construct call we use a special runtime method
- // to generate the actual object using the bound function.
- if (%_IsConstructCall()) {
- // %NewObjectFromBound implicitly uses arguments passed to this
- // function. We do not pass the arguments object explicitly to avoid
- // materializing it and guarantee that this function will be optimized.
- return %NewObjectFromBound(fn, bound_args);
- }
-
- // Combine the args we got from the bind call with the args
- // given as argument to the invocation.
+ var argc = %_ArgumentsLength();
+ if (argc == 0) {
+ return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
+ }
+ if (bindings.length === 2) {
+ return %Apply(bindings[0], bindings[1], arguments, 0, argc);
+ }
+ var bound_argc = bindings.length - 2;
+ var argv = new InternalArray(bound_argc + argc);
+ for (var i = 0; i < bound_argc; i++) {
+ argv[i] = bindings[i + 2];
+ }
+ for (var j = 0; j < argc; j++) {
+ argv[i++] = %_Arguments(j);
+ }
+ return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
+ };
+
+ %FunctionRemovePrototype(boundFunction);
+ var new_length = 0;
+ if (%_ClassOf(this) == "Function") {
+ // Function or FunctionProxy.
+ var old_length = this.length;
+ // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
+ if ((typeof old_length === "number") &&
+ ((old_length >>> 0) === old_length)) {
var argc = %_ArgumentsLength();
- var args = new InternalArray(argc + argc_bound);
- // Add bound arguments.
- for (var i = 0; i < argc_bound; i++) {
- args[i] = bound_args[i];
- }
- // Add arguments from call.
- for (var i = 0; i < argc; i++) {
- args[argc_bound + i] = %_Arguments(i);
- }
- return %Apply(fn, this_arg, args, 0, argc + argc_bound);
- };
+ if (argc > 0) argc--; // Don't count the thisArg as parameter.
+ new_length = old_length - argc;
+ if (new_length < 0) new_length = 0;
+ }
}
+ // This runtime function finds any remaining arguments on the stack,
+ // so we don't pass the arguments object.
+ var result = %FunctionBindArguments(boundFunction, this,
+ this_arg, new_length);
// We already have caller and arguments properties on functions,
// which are non-configurable. It therefore makes no sence to
@@ -1509,17 +1519,7 @@ function FunctionBind(this_arg) { // Length is 1.
// that bind should make these throw a TypeError if get or set
// is called and make them non-enumerable and non-configurable.
// To be consistent with our normal functions we leave this as it is.
-
- %FunctionRemovePrototype(result);
- %FunctionSetBound(result);
- // Set the correct length. If this is a function proxy, this.length might
- // throw, or return a bogus result. Leave length alone in that case.
- // TODO(rossberg): This is underspecified in the current proxy proposal.
- try {
- var old_length = ToInteger(this.length);
- var length = (old_length - argc_bound) > 0 ? old_length - argc_bound : 0;
- %BoundFunctionSetLength(result, length);
- } catch(x) {}
+ // TODO(lrn): Do set these to be thrower.
return result;
}
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index aada521e4..c73222a29 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -142,8 +142,14 @@ inline void CopyWords(T* dst, T* src, int num_words) {
}
-template <typename T>
-static inline void MemsetPointer(T** dest, T* value, int counter) {
+template <typename T, typename U>
+inline void MemsetPointer(T** dest, U* value, int counter) {
+#ifdef DEBUG
+ T* a = NULL;
+ U* b = NULL;
+ a = b; // Fake assignment to check assignability.
+ USE(a);
+#endif // DEBUG
#if defined(V8_HOST_ARCH_IA32)
#define STOS "stosl"
#elif defined(V8_HOST_ARCH_X64)
@@ -196,7 +202,7 @@ Vector<const char> ReadFile(FILE* file,
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
-static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 971061b05..aa6a010fa 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -37,10 +37,11 @@ namespace internal {
// ----------------------------------------------------------------------------
// Implementation Variable.
-const char* Variable::Mode2String(Mode mode) {
+const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
+ case CONST_HARMONY: return "CONST";
case LET: return "LET";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -55,21 +56,26 @@ const char* Variable::Mode2String(Mode mode) {
Variable::Variable(Scope* scope,
Handle<String> name,
- Mode mode,
+ VariableMode mode,
bool is_valid_LHS,
- Kind kind)
+ Kind kind,
+ InitializationFlag initialization_flag)
: scope_(scope),
name_(name),
mode_(mode),
kind_(kind),
location_(UNALLOCATED),
index_(-1),
+ initializer_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
is_valid_LHS_(is_valid_LHS),
- is_accessed_from_inner_scope_(false),
- is_used_(false) {
- // names must be canonicalized for fast equality checks
+ force_context_allocation_(false),
+ is_used_(false),
+ initialization_flag_(initialization_flag) {
+ // Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
+ // Var declared variables never need initialization.
+ ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
}
@@ -79,4 +85,12 @@ bool Variable::is_global() const {
return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
}
+
+int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
+ int x = (*v)->index();
+ int y = (*w)->index();
+ // Consider sorting them according to type as well?
+ return x - y;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 56c8dabd3..f20bd399c 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -40,34 +40,6 @@ namespace internal {
class Variable: public ZoneObject {
public:
- enum Mode {
- // User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
- CONST, // declared via 'const' declarations
-
- LET, // declared via 'let' declarations
-
- // Variables introduced by the compiler:
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
-
- DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
- // variable is global unless it has been shadowed
- // by an eval-introduced variable
-
- DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
-
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY // temporary variables (not user-visible), never
- // in a context
- };
-
enum Kind {
NORMAL,
THIS,
@@ -103,12 +75,13 @@ class Variable: public ZoneObject {
Variable(Scope* scope,
Handle<String> name,
- Mode mode,
+ VariableMode mode,
bool is_valid_lhs,
- Kind kind);
+ Kind kind,
+ InitializationFlag initialization_flag);
// Printing support
- static const char* Mode2String(Mode mode);
+ static const char* Mode2String(VariableMode mode);
bool IsValidLeftHandSide() { return is_valid_LHS_; }
@@ -119,17 +92,20 @@ class Variable: public ZoneObject {
Scope* scope() const { return scope_; }
Handle<String> name() const { return name_; }
- Mode mode() const { return mode_; }
- bool is_accessed_from_inner_scope() const {
- return is_accessed_from_inner_scope_;
+ VariableMode mode() const { return mode_; }
+ bool has_forced_context_allocation() const {
+ return force_context_allocation_;
}
- void MarkAsAccessedFromInnerScope() {
+ void ForceContextAllocation() {
ASSERT(mode_ != TEMPORARY);
- is_accessed_from_inner_scope_ = true;
+ force_context_allocation_ = true;
}
bool is_used() { return is_used_; }
void set_is_used(bool flag) { is_used_ = flag; }
+ int initializer_position() { return initializer_position_; }
+ void set_initializer_position(int pos) { initializer_position_ = pos; }
+
bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
@@ -146,6 +122,13 @@ class Variable: public ZoneObject {
mode_ == DYNAMIC_GLOBAL ||
mode_ == DYNAMIC_LOCAL);
}
+ bool is_const_mode() const {
+ return (mode_ == CONST ||
+ mode_ == CONST_HARMONY);
+ }
+ bool binding_needs_init() const {
+ return initialization_flag_ == kNeedsInitialization;
+ }
bool is_global() const;
bool is_this() const { return kind_ == THIS; }
@@ -153,8 +136,7 @@ class Variable: public ZoneObject {
// True if the variable is named eval and not known to be shadowed.
bool is_possibly_eval() const {
- return IsVariable(FACTORY->eval_symbol()) &&
- (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+ return IsVariable(FACTORY->eval_symbol());
}
Variable* local_if_not_shadowed() const {
@@ -168,28 +150,39 @@ class Variable: public ZoneObject {
Location location() const { return location_; }
int index() const { return index_; }
+ InitializationFlag initialization_flag() const {
+ return initialization_flag_;
+ }
void AllocateTo(Location location, int index) {
location_ = location;
index_ = index;
}
+ static int CompareIndex(Variable* const* v, Variable* const* w);
+
private:
Scope* scope_;
Handle<String> name_;
- Mode mode_;
+ VariableMode mode_;
Kind kind_;
Location location_;
int index_;
+ int initializer_position_;
+ // If this field is set, this variable references the stored locally bound
+ // variable, but it might be shadowed by variable bindings introduced by
+ // non-strict 'eval' calls between the reference scope (inclusive) and the
+ // binding scope (exclusive).
Variable* local_if_not_shadowed_;
// Valid as a LHS? (const and this are not valid LHS, for example)
bool is_valid_LHS_;
// Usage info.
- bool is_accessed_from_inner_scope_; // set by variable resolver
+ bool force_context_allocation_; // set by variable resolver
bool is_used_;
+ InitializationFlag initialization_flag_;
};
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 2865502c1..3352735dc 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 6
-#define BUILD_NUMBER 6
-#define PATCH_LEVEL 8
+#define MINOR_VERSION 7
+#define BUILD_NUMBER 12
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index fca5c137e..0ee330668 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -75,6 +75,7 @@
// makes it impossible to have them elsewhere.
#include <winsock2.h>
#include <ws2tcpip.h>
+#include <wspiapi.h>
#include <process.h> // for _beginthreadex()
#include <stdlib.h>
#endif // V8_WIN32_HEADERS_FULL
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 8db54f075..ab387d6d0 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -224,7 +224,9 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
@@ -238,10 +240,15 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (IsCodeTarget(rmode_)) {
Assembler::set_target_address_at(pc_, target);
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
} else {
Memory::Address_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
@@ -277,10 +284,16 @@ Address* RelocInfo::target_reference_address() {
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- *reinterpret_cast<Object**>(pc_) = target;
+ Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -301,11 +314,19 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -344,6 +365,11 @@ void RelocInfo::set_call_address(Address target) {
target;
CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
sizeof(Address));
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -368,14 +394,14 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
+ visitor->VisitEmbeddedPointer(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
@@ -396,14 +422,14 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 745fdaeb8..d578bf9c5 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -47,7 +47,7 @@ uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -2299,6 +2299,13 @@ void Assembler::fsin() {
}
+void Assembler::fptan() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xF2);
+}
+
+
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
emit(0xD9);
@@ -2983,7 +2990,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
}
- RelocInfo rinfo(pc_, rmode, data);
+ RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 2e373faac..1db5273b2 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -45,22 +45,22 @@ namespace internal {
// Utility functions
// Test whether a 64-bit value is in a specific range.
-static inline bool is_uint32(int64_t x) {
+inline bool is_uint32(int64_t x) {
static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
return static_cast<uint64_t>(x) <= kMaxUInt32;
}
-static inline bool is_int32(int64_t x) {
+inline bool is_int32(int64_t x) {
static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
return is_uint32(x - kMinInt32);
}
-static inline bool uint_is_int32(uint64_t x) {
+inline bool uint_is_int32(uint64_t x) {
static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
return x <= kMaxInt32;
}
-static inline bool is_uint32(uint64_t x) {
+inline bool is_uint32(uint64_t x) {
static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
return x <= kMaxUInt32;
}
@@ -215,6 +215,12 @@ struct XMMRegister {
return names[index];
}
+ static XMMRegister from_code(int code) {
+ ASSERT(code >= 0);
+ ASSERT(code < kNumRegisters);
+ XMMRegister r = { code };
+ return r;
+ }
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
@@ -643,7 +649,6 @@ class Assembler : public AssemblerBase {
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
- void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -735,6 +740,10 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_32(0x0, dst, src);
}
+ void addl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x01, src, dst);
+ }
+
void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src);
}
@@ -1266,6 +1275,7 @@ class Assembler : public AssemblerBase {
void fsin();
void fcos();
+ void fptan();
void fyl2x();
void frndint();
@@ -1394,13 +1404,14 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
protected:
bool emit_debug_code() const { return emit_debug_code_; }
private:
byte* addr_at(int pos) { return buffer_ + pos; }
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index db06909da..e423ae3a4 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -79,12 +79,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- rdi: constructor function
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(rdi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &non_function_call);
+ __ j(not_equal, &slow);
// Jump to the function-specific construct stub.
__ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -94,10 +94,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// rdi: called object
// rax: number of arguments
+ // rcx: object map
+ Label do_call;
+ __ bind(&slow);
+ __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function_call);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// Set expected number of arguments to zero (not changing rax).
__ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(rcx, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -110,272 +119,278 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
- // Enter a construct frame.
- __ EnterConstructFrame();
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ // Store a smi-tagged arguments count on the stack.
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
- // Push the function to invoke on the stack.
- __ push(rdi);
+ // Push the function to invoke on the stack.
+ __ push(rdi);
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ movq(kScratchRegister, debug_step_in_fp);
+ __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ j(not_equal, &rt_call);
#endif
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
+ __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // rdi: constructor
+ // rax: initial map
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
- __ push(rax);
- __ push(rdi);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ decb(FieldOperand(rcx,
+ SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ push(rax);
+ __ push(rdi);
- __ pop(rdi);
- __ pop(rax);
+ __ push(rdi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ bind(&allocate);
- }
+ __ pop(rdi);
+ __ pop(rax);
- // Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- { Label loop, entry;
- // To allow for truncation.
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shl(rdi, Immediate(kPointerSizeLog2));
+ // rdi: size of new object
+ __ AllocateInNewSpace(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // rdi: start of next object
+ __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ // Set extra fields in the newly allocated object.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
+ __ movzxbq(rsi,
+ FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lea(rsi,
+ Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
+ // rsi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmpq(rsi, rdi);
+ __ Assert(less_equal,
+ "Unexpected number of pre-allocated property fields.");
+ }
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- } else {
+ }
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ or_(rbx, Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ // Calculate total properties described map.
+ __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbq(rcx,
+ FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ addq(rdx, rcx);
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subq(rdx, rcx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // rbx: JSObject
+ // rdi: start of next object (will be start of FixedArray)
+ // rdx: number of elements in properties array
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rdx: number of elements
+ // rax: start of next object
+ __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+ __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+
+ // Initialize the fields to undefined.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rax: start of next object
+ // rdx: number of elements
+ { Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(below, &loop);
}
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rdi);
- __ j(less, &loop);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- { Label loop, entry;
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(below, &loop);
- }
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // rbx: JSObject
+ // rdi: FixedArray
+ __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
- // Continue with JSObject being successfully allocated
- // rbx: JSObject
- __ jmp(&allocated);
+ // Continue with JSObject being successfully allocated
+ // rbx: JSObject
+ __ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
- }
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // rbx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(rbx);
+ }
- // Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
- __ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ // Allocate the new receiver object using the runtime call.
+ // rdi: function (constructor)
+ __ bind(&rt_call);
+ // Must restore rdi (constructor) before calling runtime.
+ __ movq(rdi, Operand(rsp, 0));
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ movq(rbx, rax); // store result in rbx
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(rdi);
+ // New object allocated.
+ // rbx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(rdi);
- // Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movq(rax, Operand(rsp, 0));
+ __ SmiToInteger32(rax, rax);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(rbx);
+ __ push(rbx);
- // Setup pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ // Setup pointer to last argument.
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ if (is_api_function) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Call the function.
- if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(rax, &use_receiver);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &exit);
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0));
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
- __ LeaveConstructFrame();
+ // Leave construct frame.
+ }
// Remove caller arguments from the stack and return.
__ pop(rcx);
@@ -413,104 +428,108 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// - Object*** argv
// (see Handle::Invoke in execution.cc).
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
-#ifdef _WIN64
- // MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
-
- // Clear the context before we push it when entering the JS frame.
- __ Set(rsi, 0);
- __ EnterInternalFrame();
-
- // Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ // Open a C++ scope for the FrameScope.
+ {
+ // Platform specific argument handling. After this, the stack contains
+ // an internal frame and the pushed function and receiver, and
+ // register rax and rbx holds the argument count and argument array,
+ // while rdi holds the function pointer and rsi the context.
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movq(rdi, rdx);
+#ifdef _WIN64
+ // MSVC parameters in:
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
+ // [rsp+0x20] : argv
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Set(rsi, 0);
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load the function context into rsi.
+ __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(rdx);
+ __ push(r8);
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, r9);
+ // Load the previous frame pointer to access C argument on stack
+ __ movq(kScratchRegister, Operand(rbp, 0));
+ __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ // Load the function pointer into rdi.
+ __ movq(rdi, rdx);
#else // _WIN64
- // GCC parameters in:
- // rdi : entry (ignored)
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
-
- __ movq(rdi, rsi);
- // rdi : function
-
- // Clear the context before we push it when entering the JS frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // GCC parameters in:
+ // rdi : entry (ignored)
+ // rsi : function
+ // rdx : receiver
+ // rcx : argc
+ // r8 : argv
+
+ __ movq(rdi, rsi);
+ // rdi : function
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Set(rsi, 0);
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the function and receiver and setup the context.
+ __ push(rdi);
+ __ push(rdx);
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, rcx);
+ __ movq(rbx, r8);
#endif // _WIN64
- // Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
- // Current register contents:
- // rax : argc
- // rbx : argv
- // rsi : context
- // rdi : function
-
- // Copy arguments to the stack in a loop.
- // Register rbx points to array of pointers to handle locations.
- // Push the values of these handles.
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop);
-
- // Invoke the code.
- if (is_construct) {
- // Expects rdi to hold function pointer.
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Current stack contents:
+ // [rsp + 2 * kPointerSize ... ]: Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
+ // Current register contents:
+ // rax : argc
+ // rbx : argv
+ // rsi : context
+ // rdi : function
+
+ // Copy arguments to the stack in a loop.
+ // Register rbx points to array of pointers to handle locations.
+ // Push the values of these handles.
+ Label loop, entry;
+ __ Set(rcx, 0); // Set loop variable to 0.
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addq(rcx, Immediate(1));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(not_equal, &loop);
+
+ // Invoke the code.
+ if (is_construct) {
+ // Expects rdi to hold function pointer.
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(rax);
+ // Function must be in rdi.
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+ // Exit the internal frame. Notice that this also removes the empty
+ // context and the function left on the stack by the code
+ // invocation.
}
- // Exit the JS frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- __ LeaveInternalFrame();
// TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // remove receiver
+ __ ret(1 * kPointerSize); // Remove receiver.
}
@@ -526,23 +545,24 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -552,23 +572,24 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore function.
+ __ pop(rdi);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -579,14 +600,15 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Tear down internal frame.
+ }
// Get the full codegen state from the stack and untag it.
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -623,9 +645,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ Pushad();
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
__ Popad();
__ ret(0);
}
@@ -647,7 +670,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
- __ Push(FACTORY->undefined_value());
+ __ Push(masm->isolate()->factory()->undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
@@ -695,18 +718,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ Set(rdx, 0); // indicate regular JS_FUNCTION
+
+ __ pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- __ LeaveInternalFrame();
// Restore the function to rdi.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ jmp(&patch_receiver, Label::kNear);
@@ -807,166 +833,164 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// rsp+8: arguments
// rsp+16: receiver ("this")
// rsp+24: function
- __ EnterInternalFrame();
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
-
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &push_receiver);
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // rbp: Old base pointer
+ // rbp[1]: return address
+ // rbp[2]: function arguments
+ // rbp[3]: receiver
+ // rbp[4]: function
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(Operand(rbp, kArgumentsOffset));
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movq(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subq(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(rax); // limit
+ __ push(Immediate(0)); // index
+
+ // Get the receiver.
+ __ movq(rbx, Operand(rbp, kReceiverOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &push_receiver);
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver, Label::kNear);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(rax);
+ // Do not transform the receiver for natives.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &push_receiver);
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ j(equal, &use_global_receiver);
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &use_global_receiver);
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &push_receiver);
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ jmp(&push_receiver, Label::kNear);
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(rbx);
- __ LeaveInternalFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Push the nth argument.
+ __ push(rax);
+
+ // Update the index on the stack and in register rax.
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ __ movq(Operand(rbp, kIndexOffset), rax);
+
+ __ bind(&entry);
+ __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(rax);
+ __ SmiToInteger32(rax, rax);
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(rdi); // add function proxy as last argument
+ __ incq(rax);
+ __ Set(rbx, 0);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
+ // Leave internal frame.
+ }
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+}
// Allocate an empty JSArray. The allocated array is put into the result
@@ -979,9 +1003,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity >= 0);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ movq(scratch1, FieldOperand(array_function,
@@ -1005,9 +1029,10 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
+ Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- FACTORY->empty_fixed_array());
+ factory->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
@@ -1015,7 +1040,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
- FACTORY->empty_fixed_array());
+ factory->empty_fixed_array());
return;
}
@@ -1032,15 +1057,14 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- FACTORY->fixed_array_map());
+ factory->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
- ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, FACTORY->the_hole_value());
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@@ -1051,13 +1075,17 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
}
} else {
Label loop, entry;
+ __ movq(scratch2, Immediate(initial_capacity));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(scratch1, 0), scratch3);
- __ addq(scratch1, Immediate(kPointerSize));
+ __ movq(FieldOperand(scratch1,
+ scratch2,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ scratch3);
__ bind(&entry);
- __ cmpq(scratch1, scratch2);
- __ j(below, &loop);
+ __ decq(scratch2);
+ __ j(not_sign, &loop);
}
}
@@ -1073,38 +1101,25 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
-
// Load the initial map from the array function.
__ movq(elements_array,
FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
- // Check whether an empty sized array is requested.
- __ testq(array_size, array_size);
- __ j(not_zero, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
+ if (FLAG_debug_code) { // Assert that array size is not zero.
+ __ testq(array_size, array_size);
+ __ Assert(not_zero, "array size is unexpectedly 0");
+ }
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
- __ bind(&not_empty);
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
@@ -1122,9 +1137,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
- __ bind(&allocated);
+ Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, FACTORY->empty_fixed_array());
+ __ Move(elements_array, factory->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1143,16 +1158,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- FACTORY->fixed_array_map());
- Label not_empty_2, fill_array;
- __ SmiTest(array_size);
- __ j(not_zero, &not_empty_2);
- // Length of the FixedArray is the number of pre-allocated elements even
- // though the actual JSArray has length 0.
- __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
- Smi::FromInt(kPreallocatedArrayElements));
- __ jmp(&fill_array);
- __ bind(&not_empty_2);
+ factory->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -1161,10 +1167,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
- __ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
- __ Move(scratch, FACTORY->the_hole_value());
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
@@ -1194,12 +1199,13 @@ static void AllocateJSArray(MacroAssembler* masm,
// a construct call and a normal call.
static void ArrayNativeCode(MacroAssembler* masm,
Label *call_generic_code) {
- Label argc_one_or_more, argc_two_or_more;
+ Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array;
// Check for array construction with zero arguments.
__ testq(rax, rax);
__ j(not_zero, &argc_one_or_more);
+ __ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
rdi,
@@ -1207,7 +1213,6 @@ static void ArrayNativeCode(MacroAssembler* masm,
rcx,
rdx,
r8,
- kPreallocatedArrayElements,
call_generic_code);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->array_function_native(), 1);
@@ -1220,6 +1225,16 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ cmpq(rax, Immediate(1));
__ j(not_equal, &argc_two_or_more);
__ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+
+ __ SmiTest(rdx);
+ __ j(not_zero, &not_empty_array);
+ __ pop(r8); // Adjust stack.
+ __ Drop(1);
+ __ push(r8);
+ __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
+ __ jmp(&empty_array);
+
+ __ bind(&not_empty_array);
__ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
@@ -1520,10 +1535,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1541,7 +1557,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
StackCheckStub stub;
__ TailCallStub(&stub);
- __ Abort("Unreachable code: returned from tail call.");
+ if (FLAG_debug_code) {
+ __ Abort("Unreachable code: returned from tail call.");
+ }
__ bind(&ok);
__ ret(0);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index df4438b73..96f70bfa9 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -68,9 +68,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -155,6 +155,131 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + (1 * kPointerSize)]: function
+ // [rsp + (2 * kPointerSize)]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Get the serialized scope info from the stack.
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmpq(rcx, Immediate(0));
+ __ Assert(equal, message);
+ }
+ __ movq(rcx, GlobalObjectOperand());
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
+ __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
+ __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
+
+ // Copy the global object from the previous context.
+ __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(2 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ //
+ // rcx: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = 0;
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+ int size = JSArray::kSize + elements_size;
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+ // Copy the elements array.
+ if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ } else {
+ ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+ int i;
+ for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ while (i < elements_size) {
+ __ movsd(xmm0, FieldOperand(rcx, i));
+ __ movsd(FieldOperand(rdx, i), xmm0);
+ i += kDoubleSize;
+ }
+ ASSERT(i == elements_size);
+ }
+ }
+}
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -162,29 +287,54 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [rsp + (2 * kPointerSize)]: literal index.
// [rsp + (3 * kPointerSize)]: literals array.
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
// Load boilerplate object into rcx and check if we need to create a
// boilerplate.
- Label slow_case;
__ movq(rcx, Operand(rsp, 3 * kPointerSize));
__ movq(rax, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
__ movq(rcx,
FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
__ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ Label slow_case;
__ j(equal, &slow_case);
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ // rcx is boilerplate object.
+ Factory* factory = masm->isolate()->factory();
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ factory->fixed_cow_array_map());
+ __ j(not_equal, &check_fast_elements);
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&check_fast_elements);
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ factory->fixed_array_map());
+ __ j(not_equal, &double_elements);
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
@@ -196,43 +346,62 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(rcx);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ __ ret(3 * kPointerSize);
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: object literal flags.
+ // [rsp + (2 * kPointerSize)]: constant properties.
+ // [rsp + (3 * kPointerSize)]: literal index.
+ // [rsp + (4 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
+ __ j(not_equal, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
}
// Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
+ __ ret(4 * kPointerSize);
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
Label patch;
const Register argument = rax;
const Register map = rdx;
@@ -328,6 +497,25 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ __ PushCallerSaved(save_doubles_);
+ const int argument_count = 1;
+ __ PrepareCallCFunction(argument_count);
+#ifdef _WIN64
+ __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+ __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ __ PopCallerSaved(save_doubles_);
+ __ ret(0);
+}
+
+
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
@@ -622,12 +810,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rax);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rcx, rax);
+ __ pop(rax);
+ }
__ bind(&heapnumber_allocated);
// rcx: allocated 'empty' number
@@ -751,6 +940,10 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -1414,6 +1607,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmpq(rbx, Operand(rcx, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->transcendental_cache_hit(), 1);
__ movq(rax, Operand(rcx, 2 * kIntSize));
if (tagged) {
__ fstp(0); // Clear FPU stack.
@@ -1424,6 +1619,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
}
__ bind(&cache_miss);
+ __ IncrementCounter(counters->transcendental_cache_miss(), 1);
// Update cache with new value.
if (tagged) {
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
@@ -1453,11 +1649,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ addq(rsp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Allocate an unused object bigger than a HeapNumber.
+ __ Push(Smi::FromInt(2 * kDoubleSize));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
@@ -1473,10 +1670,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call);
__ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ EnterInternalFrame();
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -1488,6 +1686,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -1503,7 +1702,9 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// rcx: Pointer to cache entry. Must be preserved.
// st(0): Input double
Label done;
- if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+ if (type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS ||
+ type_ == TranscendentalCache::TAN) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
@@ -1573,6 +1774,12 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
case TranscendentalCache::COS:
__ fcos();
break;
+ case TranscendentalCache::TAN:
+ // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+ // FP register stack.
+ __ fptan();
+ __ fstp(0); // Pop FP register stack.
+ break;
default:
UNREACHABLE();
}
@@ -2346,10 +2553,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// rsp[0]: return address
@@ -2455,26 +2658,40 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ __ andb(rbx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask |
+ kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ascii string.
- __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ // Any other flat string must be a flat ascii string. None of the following
+ // string type tests will succeed if subject is not a string or a short
+ // external string.
+ __ andb(rbx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
__ j(zero, &seq_ascii_string, Label::kNear);
+ // rbx: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmpq(rbx, Immediate(kExternalStringTag));
__ j(less, &cons_string, Label::kNear);
- __ j(equal, &runtime);
+ __ j(equal, &external_string);
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
+ __ j(not_zero, &runtime);
// String is sliced.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
@@ -2498,10 +2715,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be ascii.
+ // Any other flat string must be sequential ascii or external.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
+ __ j(not_zero, &external_string);
__ bind(&seq_ascii_string);
// rdi: subject string (sequential ascii)
@@ -2670,12 +2887,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store last subject and last input.
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ RecordWriteField(rbx,
+ RegExpImpl::kLastSubjectOffset,
+ rax,
+ rdi,
+ kDontSaveFPRegs);
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+ __ RecordWriteField(rbx,
+ RegExpImpl::kLastInputOffset,
+ rax,
+ rdi,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
__ LoadAddress(rcx,
@@ -2729,6 +2952,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&termination_exception);
__ ThrowUncatchable(TERMINATION, rax);
+ // External string. Short external strings have already been ruled out.
+ // rdi: subject string (expected to be external)
+ // rbx: scratch
+ __ bind(&external_string);
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ testb(rbx, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, "external string expected, but not found");
+ }
+ __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ testb(rbx, Immediate(kStringEncodingMask));
+ __ j(not_zero, &seq_ascii_string);
+ __ jmp(&seq_two_byte_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3231,7 +3475,24 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // rdi : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -3252,10 +3513,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&call);
}
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
// Check that the function really is a JavaScript function.
__ JumpIfSmi(rdi, &non_function);
// Goto slow case if we do not have a function.
@@ -3292,7 +3549,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ push(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_FUNCTION);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
@@ -3319,6 +3576,35 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+#ifdef _WIN64
+ return result_size_ == 1;
+#else
+ return true;
+#endif
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ // It is important that the store buffer overflow stubs are generated first.
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ save_doubles.GetCode()->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Throw exception in eax.
__ Throw(rax);
@@ -3545,7 +3831,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
@@ -3605,20 +3891,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
__ Store(pending_exception, rax);
__ movq(rax, Failure::Exception(), RelocInfo::NONE);
__ jmp(&exit);
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// Clear any pending exceptions.
__ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
@@ -3627,11 +3916,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return. We load the address from an
+ // external reference instead of inlining the call target address directly
+ // in the code, because the builtin stubs may not have been generated yet
+ // at the time this code is generated.
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate);
@@ -3740,7 +4029,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
}
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(rbx, &slow);
@@ -3757,6 +4046,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
+ // Get return address and delta to inlined map check.
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
@@ -3791,9 +4081,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Store offset of true in the root array at the inline check site.
- ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
- == 0xB0 - 0x100);
- __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize.
+ int true_offset = 0x100 +
+ (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+ // Assert it is a 1-byte signed value.
+ ASSERT(true_offset >= 0 && true_offset < 0x100);
+ __ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3812,9 +4104,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Store offset of false in the root array at the inline check site.
- ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
- == 0xB8 - 0x100);
- __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize.
+ int false_offset = 0x100 +
+ (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+ // Assert it is a 1-byte signed value.
+ ASSERT(false_offset >= 0 && false_offset < 0x100);
+ __ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3904,85 +4198,25 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ movq(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
- __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
__ j(above_equal, index_out_of_range_);
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
+ __ SmiToInteger32(index_, index_);
- // Handle non-flat strings.
- __ and_(result_, Immediate(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmpb(result_, Immediate(kExternalStringTag));
- __ j(greater, &sliced_string);
- __ j(equal, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string, Label::kNear);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
+ StringCharLoadGenerator::Generate(
+ masm, object_, index_, result_, &call_runtime_);
- __ bind(&assure_seq_string);
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
- __ jmp(&flat_string);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxwl(result_, FieldOperand(object_,
- scratch_, times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxbl(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
__ Integer32ToSmi(result_, result_);
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
Factory* factory = masm->isolate()->factory();
@@ -3995,7 +4229,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
- __ push(index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4004,19 +4237,18 @@ void StringCharCodeAtGenerator::GenerateSlow(
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(rax)) {
+ if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(scratch_, rax);
+ __ movq(index_, rax);
}
- __ pop(index_);
__ pop(object_);
// Reload the instance type.
__ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4026,6 +4258,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
__ push(object_);
+ __ Integer32ToSmi(index_, index_);
__ push(index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
if (!result_.is(rax)) {
@@ -4058,7 +4291,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -4085,7 +4319,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -4542,6 +4777,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
static const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
+ Register candidate = scratch; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
// Calculate entry in symbol table.
__ movl(scratch, hash);
@@ -4551,7 +4787,6 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ andl(scratch, mask);
// Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ movq(candidate,
FieldOperand(symbol_table,
@@ -4566,7 +4801,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
__ j(equal, not_found);
- // Must be null (deleted entry).
+ // Must be the hole (deleted entry).
+ if (FLAG_debug_code) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ cmpq(kScratchRegister, candidate);
+ __ Assert(equal, "oddball in symbol table is not undefined or the hole");
+ }
__ jmp(&next_probe[i]);
__ bind(&is_string);
@@ -4597,7 +4837,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ jmp(not_found);
// Scratch register contains result when we fall through to here.
- Register result = scratch;
+ Register result = candidate;
__ bind(&found_in_symbol_table);
if (!result.is(rax)) {
__ movq(rax, result);
@@ -4615,7 +4855,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
__ addl(hash, character);
// hash ^= hash >> 6;
__ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
+ __ shrl(scratch, Immediate(6));
__ xorl(hash, scratch);
}
@@ -4632,7 +4872,7 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
__ addl(hash, scratch);
// hash ^= hash >> 6;
__ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
+ __ shrl(scratch, Immediate(6));
__ xorl(hash, scratch);
}
@@ -4644,13 +4884,16 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ leal(hash, Operand(hash, hash, times_8, 0));
// hash ^= hash >> 11;
__ movl(scratch, hash);
- __ sarl(scratch, Immediate(11));
+ __ shrl(scratch, Immediate(11));
__ xorl(hash, scratch);
// hash += hash << 15;
__ movl(scratch, hash);
__ shll(scratch, Immediate(15));
__ addl(hash, scratch);
+ uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+ __ andl(hash, Immediate(kHashShiftCutOffMask));
+
// if (hash == 0) hash = 27;
Label hash_not_zero;
__ j(not_zero, &hash_not_zero);
@@ -4738,18 +4981,15 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rbx: instance type
// rcx: sub string length
// rdx: from index (smi)
- Label allocate_slice, sliced_string, seq_string;
+ Label allocate_slice, sliced_string, seq_or_external_string;
__ cmpq(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(rbx, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
+ // If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ testb(rbx, Immediate(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ j(zero, &runtime);
+ __ j(zero, &seq_or_external_string, Label::kNear);
__ testb(rbx, Immediate(kSlicedNotConsMask));
__ j(not_zero, &sliced_string, Label::kNear);
@@ -4766,8 +5006,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
__ jmp(&allocate_slice, Label::kNear);
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
__ movq(rdi, rax);
__ bind(&allocate_slice);
@@ -5271,12 +5511,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ push(rdx);
- __ push(rax);
- __ Push(Smi::FromInt(op_));
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rdx);
+ __ push(rax);
+ __ Push(Smi::FromInt(op_));
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -5292,13 +5533,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -5345,12 +5585,10 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
StringDictionaryLookupStub::NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
__ push(Immediate(name->Hash()));
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
__ testq(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
- return result;
}
@@ -5365,6 +5603,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register r0,
Register r1) {
+ ASSERT(!elements.is(r0));
+ ASSERT(!elements.is(r1));
+ ASSERT(!name.is(r0));
+ ASSERT(!name.is(r1));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -5407,6 +5650,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
// esp[0 * kPointerSize]: return address.
// esp[1 * kPointerSize]: key's hash.
@@ -5492,6 +5737,364 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { rbx, rax, rdi, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { rbx, rcx, rdx, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField and
+ // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { rdx, rcx, rbx, EMIT_REMEMBERED_SET },
+ // GenerateStoreField calls the stub with two different permutations of
+ // registers. This is the second.
+ { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+ // StoreIC::GenerateNormal via GenerateDictionaryStore.
+ { rbx, r8, r9, EMIT_REMEMBERED_SET },
+ // KeyedStoreIC::GenerateGeneric.
+ { rbx, rdx, rcx, EMIT_REMEMBERED_SET},
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { rdi, rdx, rcx, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { rdx, rbx, rdi, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { rdx, r11, r15, EMIT_REMEMBERED_SET},
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { r11, rax, r15, EMIT_REMEMBERED_SET},
+ // StoreArrayLiteralElementStub::Generate
+ { rbx, rax, rcx, EMIT_REMEMBERED_SET},
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+ __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ masm->set_byte_at(0, kTwoByteNopInstruction);
+ masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(),
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+#ifdef _WIN64
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+ Register address =
+ arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(arg1));
+ __ Move(address, regs_.address());
+ __ Move(arg1, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ // TODO(gc) Can we just set address arg2 in the beginning?
+ __ Move(arg2, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ movq(arg2, Operand(address, 0));
+ }
+ __ LoadAddress(arg3, ExternalReference::isolate_address());
+ int argument_count = 3;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_object;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &on_black,
+ Label::kNear);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ zero,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need an extra register for this, so we push the object register
+ // temporarily.
+ __ push(regs_.object());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object,
+ Label::kNear);
+ __ pop(regs_.object());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&need_incremental_pop_object);
+ __ pop(regs_.object());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : element value to store
+ // -- rbx : array literal
+ // -- rdi : map of array literal
+ // -- rcx : element index as smi
+ // -- rdx : array literal index in function
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ __ CheckFastElements(rdi, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(rax, &smi_element);
+ __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+
+ __ bind(&slow_elements);
+ __ pop(rdi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rax);
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ push(rdx);
+ __ push(rdi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ SmiToInteger32(kScratchRegister, rcx);
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ movq(Operand(rcx, 0), rax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(rbx, rcx, rax,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ SmiToInteger32(kScratchRegister, rcx);
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ FixedArrayBase::kHeaderSize), rax);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r11, rcx);
+ __ StoreNumberToDoubleElements(rax,
+ r9,
+ r11,
+ xmm0,
+ &slow_elements);
+ __ ret(0);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 4058118ee..30ef3e8c5 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -59,6 +59,32 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated() { return true; }
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -124,7 +150,7 @@ class UnaryOpStub: public CodeStub {
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -210,7 +236,7 @@ class BinaryOpStub: public CodeStub {
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
@@ -397,13 +423,12 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- String* name,
- Register r0);
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<String> name,
+ Register r0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
@@ -413,6 +438,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -425,7 +452,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -446,6 +473,246 @@ class StringDictionaryLookupStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
+
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+
+ static Mode GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 7);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers, where the third
+ // is always rcx (needed for shift operations). The input is two registers
+ // that must be preserved and one scratch register provided by the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_orig_(object),
+ address_orig_(address),
+ scratch0_orig_(scratch0),
+ object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
+ if (scratch0.is(rcx)) {
+ scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
+ }
+ if (object.is(rcx)) {
+ object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
+ }
+ if (address.is(rcx)) {
+ address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
+ }
+ ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!address_orig_.is(object_));
+ ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+ // We don't have to save scratch0_orig_ because it was given to us as
+ // a scratch register. But if we had to switch to a different reg then
+ // we should save the new scratch0_.
+ if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!rcx.is(scratch0_orig_) &&
+ !rcx.is(object_orig_) &&
+ !rcx.is(address_orig_)) {
+ masm->push(rcx);
+ }
+ masm->push(scratch1_);
+ if (!address_.is(address_orig_)) {
+ masm->push(address_);
+ masm->movq(address_, address_orig_);
+ }
+ if (!object_.is(object_orig_)) {
+ masm->push(object_);
+ masm->movq(object_, object_orig_);
+ }
+ }
+
+ void Restore(MacroAssembler* masm) {
+ // These will have been preserved the entire time, so we just need to move
+ // them back. Only in one case is the orig_ reg different from the plain
+ // one, since only one of them can alias with rcx.
+ if (!object_.is(object_orig_)) {
+ masm->movq(object_orig_, object_);
+ masm->pop(object_);
+ }
+ if (!address_.is(address_orig_)) {
+ masm->movq(address_orig_, address_);
+ masm->pop(address_);
+ }
+ masm->pop(scratch1_);
+ if (!rcx.is(scratch0_orig_) &&
+ !rcx.is(object_orig_) &&
+ !rcx.is(address_orig_)) {
+ masm->pop(rcx);
+ }
+ if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+
+ // The three scratch registers (incl. rcx) will be restored by other means
+ // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
+ // save and don't need to be preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_orig_;
+ Register address_orig_;
+ Register scratch0_orig_;
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ // Third scratch register is always rcx.
+
+ Register GetRegThatIsNotRcxOr(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(rcx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 4> {};
+ class ValueBits: public BitField<int, 4, 4> {};
+ class AddressBits: public BitField<int, 8, 4> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 507bbd44c..f7e8fc114 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -38,12 +39,16 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
}
@@ -139,6 +144,331 @@ ModuloFunction CreateModuloFunction() {
#endif
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // The fail label is not actually used since we do not allocate.
+ Label allocated, cow_array;
+
+ // Check backing store for COW-ness. If the negative case, we do not have to
+ // allocate a new array, since FixedArray and FixedDoubleArray do not differ
+ // in size.
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+ __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &cow_array);
+ __ movq(r14, r8); // Destination array equals source array.
+
+ __ bind(&allocated);
+ // r8 : source FixedArray
+ // r9 : elements array length
+ // r14: destination FixedDoubleArray
+ // Set backing store's map
+ __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Convert smis to doubles and holes to hole NaNs. The Array's length
+ // remains unchanged.
+ STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
+ STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
+ Label loop, entry, convert_hole;
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ // r15: the-hole NaN
+ __ jmp(&entry);
+
+ // Allocate new array if the source array is a COW array.
+ __ bind(&cow_array);
+ __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
+ // Set receiver's backing store.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movq(r11, r14);
+ __ RecordWriteField(rdx,
+ JSObject::kElementsOffset,
+ r11,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Set backing store's length.
+ __ Integer32ToSmi(r11, r9);
+ __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ jmp(&allocated);
+
+ // Conversion loop.
+ __ bind(&loop);
+ __ movq(rbx,
+ FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+ // r9 : current element's index
+ // rbx: current element (smi-tagged)
+ __ JumpIfNotSmi(rbx, &convert_hole);
+ __ SmiToInteger32(rbx, rbx);
+ __ cvtlsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ jmp(&entry);
+ __ bind(&convert_hole);
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+ __ Assert(equal, "object found in smi-only array");
+ }
+
+ __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
+ __ bind(&entry);
+ __ decq(r9);
+ __ j(not_sign, &loop);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rbx : target map
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ __ push(rax);
+
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+ // r8 : source FixedDoubleArray
+ // r9 : number of elements
+ __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+ // r11: destination FixedArray
+ __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ Integer32ToSmi(r14, r9);
+ __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+
+ // Prepare for conversion loop.
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
+ // rsi: the-hole NaN
+ // rdi: pointer to the-hole
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(fail);
+
+ // Box doubles into heap numbers.
+ __ bind(&loop);
+ __ movq(r14, FieldOperand(r8,
+ r9,
+ times_pointer_size,
+ FixedDoubleArray::kHeaderSize));
+ // r9 : current element's index
+ // r14: current element
+ __ cmpq(r14, rsi);
+ __ j(equal, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(rax, r15, &gc_required);
+ // rax: new heap number
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ movq(FieldOperand(r11,
+ r9,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ rax);
+ __ movq(r15, r9);
+ __ RecordWriteArray(r11,
+ rax,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&entry, Label::kNear);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ movq(FieldOperand(r11,
+ r9,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ rdi);
+
+ __ bind(&entry);
+ __ decq(r9);
+ __ j(not_sign, &loop);
+
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ RecordWriteField(rdx,
+ JSObject::kElementsOffset,
+ r11,
+ r15,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ testb(result, Immediate(kIsIndirectStringMask));
+ __ j(zero, &check_sequential, Label::kNear);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ testb(result, Immediate(kSlicedNotConsMask));
+ __ j(zero, &cons_string, Label::kNear);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+ __ addq(index, result);
+ __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded, Label::kNear);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, call_runtime);
+ __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label seq_string;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &seq_string, Label::kNear);
+
+ // Handle external strings.
+ Label ascii_external, done;
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ testb(result, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, "external string expected, but not found");
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ testb(result, Immediate(kShortExternalStringTag));
+ __ j(not_zero, call_runtime);
+ // Check encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ j(not_equal, &ascii_external, Label::kNear);
+ // Two-byte string.
+ __ movzxwl(result, Operand(result, index, times_2, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&ascii_external);
+ // Ascii string.
+ __ movzxbl(result, Operand(result, index, times_1, 0));
+ __ jmp(&done, Label::kNear);
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii;
+ __ bind(&seq_string);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii, Label::kNear);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzxwl(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii);
+ __ movzxbl(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&done);
+}
#undef __
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index a0648cec6..2e8075103 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -69,6 +69,21 @@ class CodeGenerator: public AstVisitor {
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 423e6f244..339b961fe 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -100,64 +100,65 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- ASSERT(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as as two smis causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ ASSERT(!reg.is(kScratchRegister));
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ // Store the 64-bit value as two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ movq(kScratchRegister, reg);
+ __ Integer32ToSmi(reg, reg);
+ __ push(reg);
+ __ sar(kScratchRegister, Immediate(32));
+ __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+ __ push(kScratchRegister);
+ }
}
- // Store the 64-bit value as two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- }
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
+ __ Set(rax, 0); // No arguments (argc == 0).
+ __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, kDebugZapValue);
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ // Reconstruct the 64-bit value from two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ shl(kScratchRegister, Immediate(32));
+ __ pop(reg);
+ __ SmiToInteger32(reg, reg);
+ __ or_(reg, kScratchRegister);
+ }
}
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
- }
- }
- // Get rid of the internal frame.
- __ LeaveInternalFrame();
+ // Get rid of the internal frame.
+ }
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
@@ -249,12 +250,12 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- rdi : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
+ Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index b52e65932..1fd78fc57 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -42,67 +42,7 @@ const int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
- return MacroAssembler::kCallInstructionLength;
-}
-
-
-#ifdef DEBUG
-// Overwrites code with int3 instructions.
-static void ZapCodeRange(Address from, Address to) {
- CHECK(from <= to);
- int length = static_cast<int>(to - from);
- CodePatcher destroyer(from, length);
- while (length-- > 0) {
- destroyer.masm()->int3();
- }
-}
-#endif
-
-
-// Iterate through the entries of a SafepointTable that corresponds to
-// deoptimization points.
-class SafepointTableDeoptimiztionEntryIterator {
- public:
- explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
- : code_(code), table_(code), index_(-1), limit_(table_.length()) {
- FindNextIndex();
- }
-
- SafepointEntry Next(Address* pc) {
- if (index_ >= limit_) {
- *pc = NULL;
- return SafepointEntry(); // Invalid entry.
- }
- *pc = code_->instruction_start() + table_.GetPcOffset(index_);
- SafepointEntry entry = table_.GetEntry(index_);
- FindNextIndex();
- return entry;
- }
-
- private:
- void FindNextIndex() {
- ASSERT(index_ < limit_);
- while (++index_ < limit_) {
- if (table_.GetEntry(index_).deoptimization_index() !=
- Safepoint::kNoDeoptimizationIndex) {
- return;
- }
- }
- }
-
- Code* code_;
- SafepointTable table_;
- // Index of next deoptimization entry. If negative after calling
- // FindNextIndex, there are no more, and Next will return an invalid
- // SafepointEntry.
- int index_;
- // Table length.
- int limit_;
-};
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- // TODO(1276): Implement.
+ return Assembler::kCallInstructionLength;
}
@@ -119,91 +59,47 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each return after a safepoint insert a absolute call to the
+ // For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
// before the safepoint table (space was allocated there when the Code
// object was created, if necessary).
Address instruction_start = function->code()->instruction_start();
- Address jump_table_address =
- instruction_start + function->code()->safepoint_table_offset();
#ifdef DEBUG
- Address previous_pc = instruction_start;
-#endif
-
- SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
- Address entry_pc = NULL;
-
- SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
- while (current_entry.is_valid()) {
- int gap_code_size = current_entry.gap_code_size();
- unsigned deoptimization_index = current_entry.deoptimization_index();
-
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- ZapCodeRange(previous_pc, entry_pc);
+ Address prev_call_address = NULL;
#endif
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
- Address call_address = entry_pc + gap_code_size;
- // End of call instruction, if using a direct call to a 64-bit address.
- Address call_end_address =
- call_address + MacroAssembler::kCallInstructionLength;
-
- // Find next deoptimization entry, if any.
- Address next_pc = NULL;
- SafepointEntry next_entry = deoptimizations.Next(&next_pc);
-
- if (!next_entry.is_valid() || next_pc >= call_end_address) {
- // Room enough to write a long call instruction.
- CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
- patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
- RelocInfo::NONE);
-#ifdef DEBUG
- previous_pc = call_end_address;
-#endif
- } else {
- // Not room enough for a long Call instruction. Write a short call
- // instruction to a long jump placed elsewhere in the code.
+ Address call_address = instruction_start + deopt_data->Pc(i)->value();
+ // There is room enough to write a long call instruction because we pad
+ // LLazyBailout instructions with nops if necessary.
+ CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
+ patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
- Address short_call_end_address =
- call_address + MacroAssembler::kShortCallInstructionLength;
+ prev_call_address = call_address;
#endif
- ASSERT(next_pc >= short_call_end_address);
-
- // Write jump in jump-table.
- jump_table_address -= MacroAssembler::kJumpInstructionLength;
- CodePatcher jump_patcher(jump_table_address,
- MacroAssembler::kJumpInstructionLength);
- jump_patcher.masm()->Jump(
- GetDeoptimizationEntry(deoptimization_index, LAZY),
- RelocInfo::NONE);
-
- // Write call to jump at call_offset.
- CodePatcher call_patcher(call_address,
- MacroAssembler::kShortCallInstructionLength);
- call_patcher.masm()->call(jump_table_address);
-#ifdef DEBUG
- previous_pc = short_call_end_address;
-#endif
- }
-
- // Continue with next deoptimization entry.
- current_entry = next_entry;
- entry_pc = next_pc;
}
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- ZapCodeRange(previous_pc, jump_table_address);
-#endif
+ Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -211,16 +107,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
}
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -250,10 +142,14 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -268,6 +164,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, check_code);
}
@@ -713,7 +612,10 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -759,8 +661,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ PrepareCallCFunction(2);
__ movq(arg1, rax);
__ LoadAddress(arg2, ExternalReference::isolate_address());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 2);
+ }
__ pop(rax);
// Replace the current frame with the output frames.
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 7012c76f0..2626954ac 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -31,32 +31,32 @@
namespace v8 {
namespace internal {
-static const int kNumRegs = 16;
-static const RegList kJSCallerSaved =
+const int kNumRegs = 16;
+const RegList kJSCallerSaved =
1 << 0 | // rax
1 << 1 | // rcx
1 << 2 | // rdx
1 << 3 | // rbx - used as a caller-saved register in JavaScript code
1 << 7; // rdi - callee function
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kContextOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
- static const int kStateOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = kFPOffset + kPointerSize;
};
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 556523fad..963912f66 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -44,11 +44,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -122,6 +117,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -136,7 +133,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// with undefined when called as functions (without an explicit
// receiver object). rcx is zero for method calls and non-zero for
// function calls.
- if (info->is_strict_mode() || info->is_native()) {
+ if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
@@ -147,6 +144,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -195,11 +197,9 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
+ // Update the write barrier. This clobbers rax and rbx.
+ __ RecordWriteContextSlot(
+ rsi, context_offset, rax, rbx, kDontSaveFPRegs);
}
}
}
@@ -227,8 +227,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
+ is_classic_mode() ? ArgumentsAccessStub::NEW_NON_STRICT_SLOW
+ : ArgumentsAccessStub::NEW_STRICT);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -251,7 +251,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -377,7 +380,7 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -399,7 +402,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -432,7 +435,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -491,7 +494,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -555,7 +558,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -638,15 +641,16 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ movq(location, src);
+
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(scratch0, offset, src, scratch1);
+ __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -657,13 +661,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, NULL);
@@ -673,13 +671,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@@ -691,7 +691,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ movq(StackOperand(variable), result_register());
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(StackOperand(variable), kScratchRegister);
@@ -715,10 +715,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VisitForAccumulatorValue(function);
__ movq(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
- __ movq(rbx, rsi);
- __ RecordWrite(rbx, offset, result_register(), rcx);
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(rsi,
+ offset,
+ result_register(),
+ rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -731,11 +737,13 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ push(rsi);
__ Push(variable->name());
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr =
+ (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -743,7 +751,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// must not destroy the current value.
if (function != NULL) {
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
@@ -882,11 +890,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(rax);
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+ __ j(below_equal, &call_runtime);
+
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
+ Label next;
Register empty_fixed_array_value = r8;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = r9;
@@ -962,9 +976,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register rax. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ Push(Smi::FromInt(0)); // Map (0) - force slow check.
- __ push(rax);
+ __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
+ __ j(above, &non_proxy);
+ __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ push(rbx); // Smi
+ __ push(rax); // Array
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
@@ -983,17 +1005,22 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
index.scale,
FixedArray::kHeaderSize));
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
__ movq(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ __ Cmp(rdx, Smi::FromInt(0));
+ __ j(equal, &update_each, Label::kNear);
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@@ -1047,7 +1074,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->language_mode());
__ Push(info);
__ CallStub(&stub);
} else {
@@ -1077,7 +1104,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1091,7 +1118,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1137,7 +1164,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1168,16 +1195,23 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ if (local->mode() == CONST) {
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST_HARMONY
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ jmp(done);
}
@@ -1208,23 +1242,63 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::LOCAL:
case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- Label done;
- GetVar(rax, var);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == Variable::LET) {
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else { // Variable::CONST
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(rax, var);
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ }
+ __ bind(&done);
+ context()->Plug(rax);
+ break;
}
- __ bind(&done);
- context()->Plug(rax);
}
+ context()->Plug(var);
break;
}
@@ -1302,10 +1376,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_properties());
+ __ Push(constant_properties);
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1313,10 +1388,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ Push(Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1350,9 +1430,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1404,24 +1484,42 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_elements());
- if (expr->constant_elements()->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
+ __ Push(constant_elements);
+ Heap* heap = isolate()->heap();
+ if (has_constant_fast_elements &&
+ constant_elements_values->map() == heap->fixed_cow_array_map()) {
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // change, so it's possible to specialize the stub in advance.
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ length);
__ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // change, so it's possible to specialize the stub in advance.
+ FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1444,14 +1542,28 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- // Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(FieldOperand(rbx, offset), result_register());
-
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, offset, result_register(), rcx);
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+ // transition and don't need to call the runtime stub.
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ // Store the subexpression value in the array's elements.
+ __ movq(FieldOperand(rbx, offset), result_register());
+ // Update the write barrier for the array store.
+ __ RecordWriteField(rbx, offset, result_register(), rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ } else {
+ // Store the subexpression value in the array's elements.
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ Move(rcx, Smi::FromInt(i));
+ __ Move(rdx, Smi::FromInt(expr->literal_index()));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1582,14 +1694,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1698,9 +1810,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic);
break;
}
@@ -1711,9 +1823,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ movq(rcx, rax);
__ pop(rdx);
__ pop(rax); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic);
break;
}
@@ -1729,9 +1841,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Global var, const, or let.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -1756,13 +1868,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(rax); // Value.
__ push(rsi); // Context.
__ Push(var->name());
- __ Push(Smi::FromInt(strict_mode_flag()));
+ __ Push(Smi::FromInt(language_mode()));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1777,12 +1889,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ movq(location, rax);
if (var->IsContextSlot()) {
__ movq(rdx, rax);
- __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1795,14 +1909,15 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ movq(location, rax);
if (var->IsContextSlot()) {
__ movq(rdx, rax);
- __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(rax); // Value.
__ push(rsi); // Context.
__ Push(var->name());
- __ Push(Smi::FromInt(strict_mode_flag()));
+ __ Push(Smi::FromInt(language_mode()));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
@@ -1834,9 +1949,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} else {
__ pop(rdx);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1874,9 +1989,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1981,6 +2096,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -1990,8 +2106,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(rsp, arg_count * kPointerSize));
@@ -2002,17 +2117,14 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
- // Push the strict mode flag. In harmony mode every eval call
- // is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
- __ Push(Smi::FromInt(strict_mode));
+ // Push the language mode.
+ __ Push(Smi::FromInt(language_mode()));
+
+ // Push the start position of the scope the calls resides in.
+ __ Push(Smi::FromInt(scope()->start_position()));
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2043,27 +2155,10 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly in
- // generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(rax);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and resolve
// eval.
__ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -2073,6 +2168,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2182,7 +2278,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2194,7 +2291,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -2202,7 +2299,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2214,7 +2312,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
Split(non_negative_smi, if_true, if_false, fall_through);
@@ -2222,7 +2320,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2246,14 +2345,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2267,14 +2367,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2290,7 +2391,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(not_zero, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2298,7 +2399,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2374,12 +2476,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2393,14 +2496,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2414,14 +2518,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2435,7 +2540,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2443,8 +2548,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2467,14 +2572,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2490,14 +2596,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(rbx);
__ cmpq(rax, rbx);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in rdx and the formal
@@ -2511,8 +2618,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
@@ -2534,7 +2641,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2545,20 +2653,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
// Map is now in rax.
__ j(below, &null);
-
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- __ j(above_equal, &function);
-
- // Check if the constructor in the map is a function.
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ j(equal, &function);
+
+ __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ j(equal, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
__ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
@@ -2590,7 +2702,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2598,6 +2710,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2610,8 +2723,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2630,9 +2743,12 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
__ PrepareCallCFunction(1);
#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
+ __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+
#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
+ __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
#endif
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -2652,9 +2768,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2664,9 +2781,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2677,7 +2795,8 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -2695,8 +2814,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2706,7 +2826,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); // Load the object.
@@ -2726,14 +2847,15 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ movq(rdx, rax);
- __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+ __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
@@ -2745,7 +2867,8 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2763,7 +2886,8 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2771,7 +2895,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Register object = rbx;
Register index = rax;
- Register scratch = rcx;
Register result = rdx;
__ pop(object);
@@ -2781,7 +2904,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2810,7 +2932,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
@@ -2818,8 +2941,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Register object = rbx;
Register index = rax;
- Register scratch1 = rcx;
- Register scratch2 = rdx;
+ Register scratch = rdx;
Register result = rax;
__ pop(object);
@@ -2829,8 +2951,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -2859,7 +2980,8 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -2871,7 +2993,8 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
VisitForStackValue(args->at(0));
@@ -2883,10 +3006,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2894,10 +3018,11 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2905,10 +3030,23 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallStub(&stub);
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2916,8 +3054,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -2925,7 +3064,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -2934,18 +3074,31 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
+ __ j(equal, &proxy);
+
// InvokeFunction requires the function in rdi. Move it in there.
__ movq(rdi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(rdi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(rax);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2955,7 +3108,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3010,14 +3164,33 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ movq(Operand(index_2, 0), object);
__ movq(Operand(index_1, 0), temp);
- Label new_space;
- __ InNewSpace(elements, temp, equal, &new_space);
-
- __ movq(object, elements);
- __ RecordWriteHelper(object, index_1, temp);
- __ RecordWriteHelper(elements, index_2, temp);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ temp,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &no_remembered_set,
+ Label::kNear);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index_1,
+ temp,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index_2,
+ temp,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+
+ __ bind(&no_remembered_set);
- __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ addq(rsp, Immediate(3 * kPointerSize));
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3031,7 +3204,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3087,7 +3261,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = rax;
@@ -3125,7 +3300,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3139,7 +3315,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ testl(FieldOperand(rax, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ j(zero, if_true);
__ jmp(if_false);
@@ -3147,7 +3323,8 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3163,10 +3340,11 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, return_result, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
@@ -3496,14 +3674,16 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Push(Smi::FromInt(strict_mode_flag()));
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ Push(Smi::FromInt(strict_mode_flag));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ Push(var->name());
@@ -3545,17 +3725,41 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+ } else {
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ } else {
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ }
+ __ bind(&done);
}
break;
}
@@ -3760,9 +3964,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3777,9 +3981,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(rcx);
__ pop(rdx);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3827,20 +4031,25 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(rax, if_true);
@@ -3875,9 +4084,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(rax, if_false);
- STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
- Split(above_equal, if_true, if_false, fall_through);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+ __ j(equal, if_true);
+ __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
+ Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
@@ -3895,18 +4106,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -3914,6 +4114,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
@@ -3923,20 +4127,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
@@ -3945,7 +4142,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
@@ -3959,33 +4156,25 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cc = equal;
- __ pop(rdx);
break;
case Token::LT:
cc = less;
- __ pop(rdx);
break;
case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = less;
- __ movq(rdx, result_register());
- __ pop(rax);
+ cc = greater;
break;
case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = greater_equal;
- __ movq(rdx, result_register());
- __ pop(rax);
+ cc = less_equal;
break;
case Token::GTE:
cc = greater_equal;
- __ pop(rdx);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
+ __ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4005,7 +4194,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
@@ -4017,8 +4206,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4026,14 +4216,20 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- if (expr->is_strict()) {
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(rax, nil_value);
+ if (expr->op() == Token::EQ_STRICT) {
Split(equal, if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ j(equal, if_true);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(rax, other_nil_value);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// It can be an undetectable object.
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 9d55594dc..3a577530d 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -221,7 +221,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ movq(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0);
+ __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
@@ -531,14 +531,12 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register receiver = rdx;
Register index = rax;
- Register scratch1 = rbx;
- Register scratch2 = rcx;
+ Register scratch = rcx;
Register result = rax;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -606,45 +604,40 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra;
+ Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(r9, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow_with_tagged_index);
__ SmiToInteger32(rcx, rcx);
- __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
+ __ CmpInstanceType(r9, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
- __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+ __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow);
- __ CmpInstanceType(rbx, JS_PROXY_TYPE);
- __ j(equal, &slow);
- __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow);
+ // Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
// rcx: index
- __ j(above, &fast);
+ __ j(above, &fast_object_with_map_check);
// Slow case: call runtime.
__ bind(&slow);
@@ -666,9 +659,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_extra_double);
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast);
+ __ jmp(&fast_object_without_map_check);
+
+ __ bind(&check_extra_double);
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -678,9 +682,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rdx: receiver (a JSArray)
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -688,30 +689,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ j(below_equal, &extra);
// Fast case: Do the store.
- __ bind(&fast);
+ __ bind(&fast_object_with_map_check);
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
+ // rdx: receiver (a JSArray)
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &fast_double_with_map_check);
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
Label non_smi_value;
+ __ JumpIfNotSmi(rax, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
__ ret(0);
+
__ bind(&non_smi_value);
- // Slow case that needs to retain rcx for use by RecordWrite.
- // Update write barrier for the elements array address.
- __ movq(rdx, rax);
- __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
+ // Writing a non-smi, check whether array allows non-smi elements.
+ // r9: receiver's map
+ __ CheckFastObjectElements(r9, &slow, Label::kNear);
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ movq(rdx, rax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ bind(&fast_double_without_map_check);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow);
__ ret(0);
}
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rdx : receiver
@@ -721,7 +746,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -794,7 +819,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -821,10 +846,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -846,21 +871,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
+ // Push the receiver and the name of the function.
+ __ push(rdx);
+ __ push(rcx);
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
+ // Call the entry.
+ CEntryStub stub(1);
+ __ Set(rax, 2);
+ __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+ __ CallStub(&stub);
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- __ LeaveInternalFrame();
+ // Move result to rdi and exit the internal frame.
+ __ movq(rdi, rax);
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -881,7 +907,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -913,39 +939,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// rcx : function name
@@ -1002,13 +995,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- __ EnterInternalFrame();
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(rcx); // save the key
+ __ push(rdx); // pass the receiver
+ __ push(rcx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(rcx); // restore the key
+ }
__ movq(rdi, rax);
__ jmp(&do_call);
@@ -1072,27 +1066,12 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(rcx, &miss);
Condition cond = masm->IsObjectStringType(rcx, rax, rax);
__ j(NegateCondition(cond), &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -1212,7 +1191,12 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(mapped_location, rax);
__ lea(r9, mapped_location);
__ movq(r8, rax);
- __ RecordWrite(rbx, r9, r8);
+ __ RecordWrite(rbx,
+ r9,
+ r8,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1221,7 +1205,12 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(unmapped_location, rax);
__ lea(r9, unmapped_location);
__ movq(r8, rax);
- __ RecordWrite(rbx, r9, r8);
+ __ RecordWrite(rbx,
+ r9,
+ r8,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -1562,6 +1551,51 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ movq(rax, rdx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(rbx);
+ __ push(rdx);
+ __ push(rbx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rbx : target map
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ // Must return the modified receiver in eax.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ movq(rax, rdx);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ pop(rbx);
+ __ push(rdx);
+ __ push(rbx); // return address
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
#undef __
@@ -1573,11 +1607,9 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return less;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return less;
+ return greater;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return greater_equal;
+ return less_equal;
case Token::GTE:
return greater_equal;
default:
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 9064a266e..cbbe65f0c 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -43,35 +43,22 @@ class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
- int deoptimization_index)
+ Safepoint::DeoptMode mode)
: codegen_(codegen),
pointers_(pointers),
- deoptimization_index_(deoptimization_index) { }
+ deopt_mode_(mode) { }
virtual ~SafepointGenerator() { }
- virtual void BeforeCall(int call_size) const {
- ASSERT(call_size >= 0);
- // Ensure that we have enough space after the previous safepoint position
- // for the jump generated there.
- int call_end = codegen_->masm()->pc_offset() + call_size;
- int prev_jump_end = codegen_->LastSafepointEnd() + kMinSafepointSize;
- if (call_end < prev_jump_end) {
- int padding_size = prev_jump_end - call_end;
- STATIC_ASSERT(kMinSafepointSize <= 9); // One multibyte nop is enough.
- codegen_->masm()->nop(padding_size);
- }
- }
+ virtual void BeforeCall(int call_size) const { }
virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
private:
- static const int kMinSafepointSize =
- MacroAssembler::kShortCallInstructionLength;
LCodeGen* codegen_;
LPointerMap* pointers_;
- int deoptimization_index_;
+ Safepoint::DeoptMode deopt_mode_;
};
@@ -81,6 +68,12 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -94,7 +87,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
@@ -145,7 +137,7 @@ bool LCodeGen::GeneratePrologue() {
// when called as functions (without an explicit receiver
// object). rcx is zero for method calls and non-zero for function
// calls.
- if (info_->is_strict_mode() || info_->is_native()) {
+ if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
@@ -200,7 +192,7 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both rax and rsi. It replaces the context
// passed to us. It's saved in the stack and kept live in rsi.
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
@@ -217,11 +209,8 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
+ // Update the write barrier. This clobbers rax and rbx.
+ __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -252,19 +241,11 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
}
+ EnsureSpaceForLazyDeopt();
return !is_aborted();
}
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
bool LCodeGen::GenerateJumpTable() {
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
@@ -280,21 +261,12 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
-
- // Pad code to ensure that the last piece of deferred code have
- // room for lazy bailout.
- while ((masm()->pc_offset() - LastSafepointEnd())
- < Deoptimizer::patch_size()) {
- int padding = masm()->pc_offset() - LastSafepointEnd();
- if (padding > 9) {
- __ nop(9);
- } else {
- __ nop(padding);
- }
- }
}
// Deferred code is the last part of the instruction sequence. Mark
@@ -306,20 +278,6 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- // Ensure that there is space at the end of the code to write a number
- // of jump instructions, as well as to afford writing a call near the end
- // of the code.
- // The jumps are used when there isn't room in the code stream to write
- // a long call instruction. Instead it writes a shorter call to a
- // jump instruction in the same code object.
- // The calls are used when lazy deoptimizing a function and calls to a
- // deoptimization function.
- int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
- static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
- int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
- while (byte_count-- > 0) {
- __ int3();
- }
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -368,6 +326,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
@@ -479,7 +443,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ call(code, mode);
- RegisterLazyDeoptimization(instr, safepoint_mode, argc);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -506,7 +470,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
}
@@ -516,39 +480,12 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- ASSERT(argc == 0);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
- RecordSafepointWithRegisters(
- instr->pointer_map(),
- argc,
- deoptimization_environment->deoptimization_index());
- }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -570,14 +507,17 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
Translation translation(&translations_, frame_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment);
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -629,6 +569,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
@@ -660,17 +601,29 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode, int argc) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kLazyDeopt);
+ }
+}
+
+
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index) {
+ Safepoint::DeoptMode deopt_mode) {
ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->operands();
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
+ kind, arguments, deopt_mode);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
@@ -687,22 +640,21 @@ void LCodeGen::RecordSafepoint(
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
}
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
+ RecordSafepoint(&empty_pointers, deopt_mode);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
}
@@ -737,12 +689,6 @@ void LCodeGen::DoGap(LGap* gap) {
LParallelMove* move = gap->GetParallelMove(inner_pos);
if (move != NULL) DoParallelMove(move);
}
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
}
@@ -1520,39 +1466,51 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (left->IsRegister()) {
- __ cmpl(ToRegister(left), Immediate(value));
- } else {
- __ cmpl(ToOperand(left), Immediate(value));
- }
- } else if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
} else {
- EmitCmpI(left, right);
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ int32_t value;
+ if (right->IsConstantOperand()) {
+ value = ToInteger32(LConstantOperand::cast(right));
+ __ cmpl(ToRegister(left), Immediate(value));
+ } else if (left->IsConstantOperand()) {
+ value = ToInteger32(LConstantOperand::cast(left));
+ if (right->IsRegister()) {
+ __ cmpl(ToRegister(right), Immediate(value));
+ } else {
+ __ cmpl(ToOperand(right), Immediate(value));
+ }
+ // We transposed the operands. Reverse the condition.
+ cc = ReverseCondition(cc);
+ } else {
+ if (right->IsRegister()) {
+ __ cmpl(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpl(ToRegister(left), ToOperand(right));
+ }
+ }
+ }
+ EmitBranch(true_block, false_block, cc);
}
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
}
@@ -1577,30 +1535,33 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
-
int false_block = chunk_->LookupDestination(instr->false_block_id());
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
- // If the expression is known to untagged or smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- // Jump directly to the false block.
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- if (instr->is_strict()) {
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(reg, nil_value);
+ if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, equal);
} else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(reg, other_nil_value);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1653,6 +1614,30 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+ Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+ return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsString(reg, temp, false_label);
+
+ EmitBranch(true_block, false_block, true_cond);
+}
+
+
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1684,6 +1669,21 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = TokenToCondition(op, false);
+ __ testq(rax, rax);
+
+ EmitBranch(true_block, false_block, condition);
+}
+
+
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1752,30 +1752,40 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String> class_name,
Register input,
- Register temp) {
+ Register temp,
+ Register scratch) {
__ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(above_equal, is_true);
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ __ j(equal, is_true);
+ __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+ __ j(equal, is_true);
} else {
- __ j(above_equal, is_false);
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movq(scratch, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ subb(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpb(scratch,
+ Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)));
+ __ j(above, is_false);
}
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1804,6 +1814,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1812,7 +1823,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- EmitClassOfTest(true_label, false_label, class_name, input, temp);
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
EmitBranch(true_block, false_block, equal);
}
@@ -1851,11 +1862,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
-
+ virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
-
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -1910,8 +1920,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
{
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
@@ -1937,6 +1947,9 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
2);
ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LEnvironment* env = instr->deoptimization_environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Move result to a register that survives the end of the
// PushSafepointRegisterScope.
__ movq(kScratchRegister, rax);
@@ -1960,9 +1973,6 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
Label true_value, done;
__ testq(rax, rax);
__ j(condition, &true_value, Label::kNear);
@@ -1996,7 +2006,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
}
- if (instr->hydrogen()->check_hole_value()) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
@@ -2016,25 +2026,44 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register object = ToRegister(instr->TempAt(0));
+ Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- ASSERT(!value.is(temp));
- bool check_hole = instr->hydrogen()->check_hole_value();
- if (!check_hole && value.is(rax)) {
- __ store_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- return;
- }
+ ASSERT(!value.is(object));
+ Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
+
+ __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
- __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- if (check_hole) {
- __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
- __ movq(Operand(temp, 0), value);
+
+ // Store the value.
+ __ movq(Operand(address, 0), value);
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Label smi_store;
+ HType type = instr->hydrogen()->value()->type();
+ if (!type.IsHeapNumber() && !type.IsString() && !type.IsNonPrimitive()) {
+ __ JumpIfSmi(value, &smi_store, Label::kNear);
+ }
+
+ int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
+ __ lea(object, Operand(address, -offset));
+ // Cells are always in the remembered set.
+ __ RecordWrite(object,
+ address,
+ value,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ bind(&smi_store);
+ }
}
@@ -2043,7 +2072,7 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2061,10 +2090,19 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ movq(ContextOperand(context, instr->slot_index()), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->TempAt(0));
- __ RecordWrite(context, offset, value, scratch);
+ __ RecordWriteContextSlot(context,
+ offset,
+ value,
+ scratch,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -2085,7 +2123,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2283,17 +2321,15 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset);
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2365,6 +2401,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2508,12 +2545,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
v8::internal::ParameterCount actual(rax);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
@@ -2529,7 +2563,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2591,7 +2625,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Setup deoptimization.
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
// Restore context.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2681,6 +2715,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -2884,6 +2919,14 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2923,6 +2966,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSin:
DoMathSin(instr);
break;
+ case kMathTan:
+ DoMathTan(instr);
+ break;
case kMathLog:
DoMathLog(instr);
break;
@@ -2938,10 +2984,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(instr->HasPointerMap());
ASSERT(instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2974,13 +3018,13 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1);
}
@@ -3028,21 +3072,36 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, offset, value, temp);
+ __ RecordWriteField(object,
+ offset,
+ value,
+ temp,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWrite(temp, offset, value, object);
+ __ RecordWriteField(temp,
+ offset,
+ value,
+ object,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
}
@@ -3053,7 +3112,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3090,6 +3149,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3125,6 +3185,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ Condition cc = masm()->CheckSmi(value);
+ DeoptimizeIf(NegateCondition(cc), instr->environment());
+ }
+
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3141,12 +3208,20 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
+ __ RecordWrite(elements,
+ key,
+ value,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3175,13 +3250,54 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ Label not_applicable;
+ __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+ __ j(not_equal, &not_applicable);
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ // Write barrier.
+ ASSERT_NE(instr->temp_reg(), NULL);
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(rdx));
+ ASSERT(new_map_reg.is(rbx));
+ __ movq(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(rdx));
+ ASSERT(new_map_reg.is(rbx));
+ __ movq(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
@@ -3196,85 +3312,19 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- // Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ testb(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, deferred->entry());
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, deferred->entry());
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzxwl(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ movzxbl(result, FieldOperand(string,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -3316,6 +3366,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3392,6 +3443,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3487,16 +3539,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@@ -3545,6 +3587,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -3781,6 +3833,11 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
// Setup the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
@@ -3801,26 +3858,108 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(rcx));
+ ASSERT(!result.is(rcx));
+
+ // Increase the offset so that subsequent objects end up right after
+ // this one.
+ int current_offset = *offset;
+ int size = object->map()->instance_size();
+ *offset += size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ ASSERT(object->elements()->length() == 0 ||
+ object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ __ movq(rcx, FieldOperand(source, i));
+ __ movq(FieldOperand(result, current_offset + i), rcx);
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(rcx, Operand(result, *offset));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ movq(FieldOperand(result, total_offset), rcx);
+ } else {
+ __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(FieldOperand(result, total_offset), rcx);
+ }
+ }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ Push(Smi::FromInt(size));
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
// Setup the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_properties());
- __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+ __ Push(constant_properties);
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= instr->hydrogen()->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Push(Smi::FromInt(flags));
// Pick the right runtime function to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -3890,8 +4029,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(shared_info->language_mode());
__ Push(shared_info);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
@@ -3931,12 +4069,11 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
+ Condition final_branch_condition =
+ EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+ if (final_branch_condition != no_condition) {
+ EmitBranch(true_block, false_block, final_branch_condition);
+ }
}
@@ -3981,9 +4118,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
- final_branch_condition = above_equal;
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+ __ j(equal, true_label);
+ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4001,7 +4141,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = zero;
} else {
- final_branch_condition = never;
__ jmp(false_label);
}
@@ -4037,9 +4176,29 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ while (padding_size > 0) {
+ int nop_size = padding_size > 9 ? 9 : padding_size;
+ __ nop(nop_size);
+ padding_size -= nop_size;
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
+ EnsureSpaceForLazyDeopt();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4055,15 +4214,12 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
// Create safepoint generator that will also ensure enough space in the
// reloc info for patching in deoptimization (since this is invoking a
// builtin)
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
__ Push(Smi::FromInt(strict_mode_flag()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
@@ -4076,30 +4232,21 @@ void LCodeGen::DoIn(LIn* instr) {
EmitPushTaggedOperand(obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- {
- PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RegisterLazyDeoptimization(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
- }
-
- // The gap code includes the restoring of the safepoint registers.
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
+ PushSafepointRegistersScope scope(this);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4109,10 +4256,15 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
if (instr->hydrogen()->is_function_entry()) {
// Perform stack overflow check.
Label done;
@@ -4120,7 +4272,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ j(above_equal, &done, Label::kNear);
StackCheckStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ EnsureSpaceForLazyDeopt();
__ bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -4128,8 +4283,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
new DeferredStackCheck(this, instr);
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
}
}
@@ -4145,7 +4305,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 8cb4cece9..868f75e90 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
+ last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -77,6 +78,7 @@ class LCodeGen BASE_EMBEDDED {
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -97,8 +99,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -125,8 +127,8 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- int strict_mode_flag() const {
- return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+ StrictModeFlag strict_mode_flag() const {
+ return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -134,13 +136,13 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
Handle<String> class_name,
Register input,
- Register temporary);
+ Register temporary,
+ Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
@@ -189,9 +191,8 @@ class LCodeGen BASE_EMBEDDED {
int argc,
LInstruction* instr);
-
// Generate a direct call to a known function. Expects the function
- // to be in edi.
+ // to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
@@ -199,10 +200,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode,
+ int argc);
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
@@ -229,6 +231,7 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
@@ -236,21 +239,17 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index);
+ Safepoint::DeoptMode mode);
void RecordPosition(int position);
- int LastSafepointEnd() {
- return static_cast<int>(safepoints_.GetPcAfterGap());
- }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
XMMRegister result,
bool deoptimize_on_undefined,
@@ -259,8 +258,10 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -269,6 +270,13 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
@@ -282,6 +290,13 @@ class LCodeGen BASE_EMBEDDED {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
@@ -290,6 +305,8 @@ class LCodeGen BASE_EMBEDDED {
Address address;
};
+ void EnsureSpaceForLazyDeopt();
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -306,6 +323,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
+ int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -345,16 +363,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -365,6 +387,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 5fc56462b..b486faec6 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -214,10 +214,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -229,6 +230,13 @@ void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream);
@@ -243,6 +251,14 @@ void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(0)->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -446,6 +462,12 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
@@ -706,7 +728,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -736,7 +760,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -748,7 +772,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -806,28 +831,6 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
}
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-}
-
-
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
@@ -989,10 +992,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1002,7 +1008,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1011,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1201,8 +1206,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction();
+ LCallFunction* result = new LCallFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1228,8 +1234,24 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
}
@@ -1242,16 +1264,6 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1391,12 +1403,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
- LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1408,15 +1418,22 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left;
+ LOperand* right;
+ if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+ left = UseRegisterOrConstantAtStart(instr->left());
+ right = UseRegisterOrConstantAtStart(instr->right());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
return new LCmpIDAndBranch(left, right);
}
}
@@ -1436,10 +1453,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+ LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+ return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
}
@@ -1449,6 +1466,13 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
}
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(instr->value()));
@@ -1463,6 +1487,19 @@ LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
}
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
@@ -1489,6 +1526,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister(),
TempRegister());
}
@@ -1716,7 +1754,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
+ return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1731,8 +1769,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
- return instr->check_hole_value() ? AssignEnvironment(result) : result;
+ new LStoreGlobalCell(UseTempRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1948,6 +1988,27 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LOperand* temp_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, temp_reg);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), rax);
+ LOperand* fixed_object_reg = FixedTemp(rdx);
+ LOperand* new_map_reg = FixedTemp(rbx);
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2010,8 +2071,14 @@ LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralFast, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+ HObjectLiteralGeneric* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, rax), instr);
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index d169bf6df..c21223b3a 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -107,10 +107,12 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -132,7 +134,8 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -162,6 +165,7 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -609,17 +613,18 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
public:
- LIsNullAndBranch(LOperand* value, LOperand* temp) {
+ LIsNilAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
- bool is_strict() const { return hydrogen()->is_strict(); }
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -638,6 +643,20 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
};
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -666,6 +685,23 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
};
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+ explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -705,11 +741,12 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
+ temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -790,18 +827,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1197,11 +1231,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
+ explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -1223,7 +1258,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(1); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1257,7 +1292,6 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1274,7 +1308,9 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@@ -1372,14 +1408,17 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
- LCallFunction() {}
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1548,7 +1587,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1568,7 +1606,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1653,7 +1691,31 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1828,10 +1890,17 @@ class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
@@ -2146,12 +2215,12 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 9cfc9b658..caca628f1 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -44,6 +44,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
+ has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -54,7 +55,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_address());
+ reinterpret_cast<Address>(isolate->heap()->roots_array_start());
intptr_t delta = other.address() - roots_register_value;
return delta;
}
@@ -196,28 +197,47 @@ void MacroAssembler::CompareRoot(const Operand& with,
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register addr,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then) {
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+ // Load store buffer top.
+ LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ // Store pointer to buffer.
+ movq(Operand(scratch, 0), addr);
+ // Increment buffer top.
+ addq(scratch, Immediate(kPointerSize));
+ // Write back new top of buffer.
+ StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ // Call stub on end of buffer.
+ Label done;
+ // Check for end of buffer.
+ testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kReturnAtEnd) {
+ Label buffer_overflowed;
+ j(not_equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ j(equal, &done, Label::kNear);
+ }
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(save_fp);
+ CallStub(&store_buffer_overflow);
+ if (and_then == kReturnAtEnd) {
+ ret(0);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ bind(&done);
}
-
- // Compute the page start address from the heap object pointer, and reuse
- // the 'object' register for it.
- and_(object, Immediate(~Page::kPageAlignmentMask));
-
- // Compute number of region covering addr. See Page::GetRegionNumberForAddress
- // method for more details.
- shrl(addr, Immediate(Page::kRegionSizeLog2));
- andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
- // Set dirty mark for region.
- bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
@@ -225,7 +245,7 @@ void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch,
- Label::Distance near_jump) {
+ Label::Distance distance) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
@@ -240,7 +260,7 @@ void MacroAssembler::InNewSpace(Register object,
}
movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
@@ -252,127 +272,162 @@ void MacroAssembler::InNewSpace(Register object,
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
}
}
-void MacroAssembler::RecordWrite(Register object,
- int offset,
- Register value,
- Register index) {
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
+ ASSERT(!value.is(rsi) && !dst.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
+ // catch stores of Smis.
Label done;
- JumpIfSmi(value, &done);
- RecordWriteNonSmi(object, offset, value, index);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ lea(dst, FieldOperand(object, offset));
+ if (emit_debug_code()) {
+ Label ok;
+ testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
bind(&done);
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors. This clobbering repeats the
- // clobbering done inside RecordWriteNonSmi but it's necessary to
- // avoid having the fast case for smis leave the registers
- // unchanged.
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
-
+void MacroAssembler::RecordWriteArray(Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
+ // catch stores of Smis.
Label done;
- JumpIfSmi(value, &done);
- InNewSpace(object, value, equal, &done);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Array access: calculate the destination address. Index is not a smi.
+ Register dst = index;
+ lea(dst, Operand(object, index, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWriteHelper(object, address, value);
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
-void MacroAssembler::RecordWriteNonSmi(Register object,
- int offset,
- Register scratch,
- Register index) {
- Label done;
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are rsi.
+ ASSERT(!value.is(rsi) && !address.is(rsi));
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
if (emit_debug_code()) {
- Label okay;
- JumpIfNotSmi(object, &okay, Label::kNear);
- Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
- bind(&okay);
-
- if (offset == 0) {
- // index must be int32.
- Register tmp = index.is(rax) ? rbx : rax;
- push(tmp);
- movl(tmp, index);
- cmpq(tmp, index);
- Check(equal, "Index register for RecordWrite must be untagged int32.");
- pop(tmp);
- }
+ AbortIfSmi(object);
}
- // Test that the object address is not in the new space. We cannot
- // update page dirty marks for new space pages.
- InNewSpace(object, scratch, equal, &done);
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
- // The offset is relative to a tagged or untagged HeapObject pointer,
- // so either offset or offset + kHeapObjectTag must be a
- // multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize) ||
- IsAligned(offset + kHeapObjectTag, kPointerSize));
+ if (FLAG_debug_code) {
+ Label ok;
+ cmpq(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
- Register dst = index;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric.
- lea(dst, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ // Skip barrier if writing a smi.
+ JumpIfSmi(value, &done);
}
- RecordWriteHelper(object, dst, scratch);
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (emit_debug_code()) Check(cc, msg);
}
@@ -400,7 +455,7 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L, Label::kNear);
Abort(msg);
- // will not return here
+ // Control will not return here.
bind(&L);
}
@@ -448,9 +503,6 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
@@ -458,52 +510,44 @@ void MacroAssembler::Abort(const char* msg) {
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE);
push(kScratchRegister);
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
+ // Control will not return here.
int3();
}
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- MaybeObject* result = stub->TryGetCode();
- if (!result->IsFailure()) {
- jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
- RelocInfo::CODE_TARGET);
- }
- return result;
-}
-
-
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -540,18 +584,11 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1);
- ces.SaveDoubles();
+ CEntryStub ces(1, kSaveFPRegs);
CallStub(&ces);
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
- int num_arguments) {
- return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
// If the expected number of arguments of the runtime function is
@@ -573,26 +610,6 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
}
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
- int num_arguments) {
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- // Since we did not call the stub, there was no allocation failure.
- // Return some non-failure object.
- return HEAP->undefined_value();
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- return TryCallStub(&ces);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
Set(rax, num_arguments);
@@ -622,24 +639,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- return TryJumpToExternalReference(ext, result_size);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -649,15 +648,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- return TryTailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
@@ -680,8 +670,8 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+ int stack_space) {
Label empty_result;
Label prologue;
Label promote_scheduled_exception;
@@ -711,8 +701,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
// Call the api function!
- movq(rax,
- reinterpret_cast<int64_t>(function->address()),
+ movq(rax, reinterpret_cast<int64_t>(function_address),
RelocInfo::RUNTIME_ENTRY);
call(rax);
@@ -744,11 +733,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
- 0, 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
bind(&empty_result);
// It was zero; the result is undefined.
@@ -769,8 +754,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
call(rax);
movq(rax, prev_limit_reg);
jmp(&leave_exit_frame);
-
- return result;
}
@@ -783,20 +766,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& ext, int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- return TryTailCallStub(&ces);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
@@ -825,6 +799,57 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
}
+static const Register saved_regs[] =
+ { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ push(reg);
+ }
+ }
+ // R12 to r15 are callee save on all platforms.
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(Operand(rsp, i * kDoubleSize), reg);
+ }
+ }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(reg, Operand(rsp, i * kDoubleSize));
+ }
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ }
+ for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ pop(reg);
+ }
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -2236,6 +2261,13 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
}
+void MacroAssembler::TestBit(const Operand& src, int bits) {
+ int byte_offset = bits / kBitsPerByte;
+ int bit_in_byte = bits & (kBitsPerByte - 1);
+ testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+}
+
+
void MacroAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -2385,86 +2417,105 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+ HandlerType type,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // The pc (return address) is already on TOS. This code pushes state,
- // frame pointer, context, and current handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First compute the state and push the frame pointer and context.
+ unsigned state = StackHandler::OffsetField::encode(handler_index);
if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- push(Immediate(StackHandler::TRY_CATCH));
- } else {
- push(Immediate(StackHandler::TRY_FINALLY));
- }
push(rbp);
push(rsi);
+ state |= (type == TRY_CATCH_HANDLER)
+ ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+ : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
} else {
ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for rbp. We expect the code throwing an exception to check rbp
- // before dereferencing it to restore the context.
- push(Immediate(StackHandler::ENTRY));
+ // The frame pointer does not point to a JS frame so we save NULL for
+ // rbp. We expect the code throwing an exception to check rbp before
+ // dereferencing it to restore the context.
push(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
+ state |= StackHandler::KindField::encode(StackHandler::ENTRY);
}
- // Save the current handler.
- Operand handler_operand =
- ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
- push(handler_operand);
- // Link this handler.
- movq(handler_operand, rsp);
+
+ // Push the state and the code object.
+ push(Immediate(state));
+ Push(CodeObject());
+
+ // Link the current handler as the next handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ push(ExternalOperand(handler_address));
+ // Set this new handler as the current one.
+ movq(ExternalOperand(handler_address), rsp);
}
void MacroAssembler::PopTryHandler() {
- ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- // Unlink this handler.
- Operand handler_operand =
- ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
- pop(handler_operand);
- // Remove the remaining fields.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ pop(ExternalOperand(handler_address));
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // rax = exception, rdi = code object, rdx = state.
+ movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ shr(rdx, Immediate(StackHandler::kKindWidth));
+ movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+ SmiToInteger64(rdx, rdx);
+ lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ jmp(rdi);
+}
+
+
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // Keep thrown value in rax.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in rax.
if (!value.is(rax)) {
movq(rax, value);
}
-
+ // Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Operand handler_operand = ExternalOperand(handler_address);
- movq(rsp, handler_operand);
- // get next in chain
- pop(handler_operand);
+ movq(rsp, ExternalOperand(handler_address));
+ // Restore the next handler.
+ pop(ExternalOperand(handler_address));
+
+ // Remove the code object and state, compute the handler address in rdi.
+ pop(rdi); // Code object.
+ pop(rdx); // Offset and state.
+
+ // Restore the context and frame pointer.
pop(rsi); // Context.
pop(rbp); // Frame pointer.
- pop(rdx); // State.
// If the handler is a JS frame, restore the context to the frame.
- // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
- // of them.
+ // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
+ // rbp or rsi.
Label skip;
- cmpq(rdx, Immediate(StackHandler::ENTRY));
- j(equal, &skip, Label::kNear);
+ testq(rsi, rsi);
+ j(zero, &skip, Label::kNear);
movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
- ret(0);
+ JumpToHandlerEntry();
}
@@ -2472,40 +2523,17 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // Keep thrown value in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Fetch top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Load(rsp, handler_address);
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done, Label::kNear);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- movq(rsp, Operand(rsp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- Operand handler_operand = ExternalOperand(handler_address);
- pop(handler_operand);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+ // The exception is expected in rax.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
Set(rax, static_cast<int64_t>(false));
Store(external_caught, rax);
@@ -2514,16 +2542,38 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
isolate());
movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
Store(pending_exception, rax);
+ } else if (!value.is(rax)) {
+ movq(rax, value);
}
- // Discard the context saved in the handler and clear the context pointer.
- pop(rdx);
- Set(rsi, 0);
+ // Drop the stack pointer to the top of the top stack handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ Load(rsp, handler_address);
- pop(rbp); // Restore frame pointer.
- pop(rdx); // Discard state.
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
- ret(0);
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::ENTRY == 0);
+ testl(Operand(rsp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::KindField::kMask));
+ j(not_zero, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(ExternalOperand(handler_address));
+
+ // Remove the code object and state, compute the handler address in rdi.
+ pop(rdi); // Code object.
+ pop(rdx); // Offset and state.
+
+ // Clear the context pointer and frame pointer (0 was saved in the handler).
+ pop(rsi);
+ pop(rbp);
+
+ JumpToHandlerEntry();
}
@@ -2567,13 +2617,91 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastElementValue));
j(above, fail, distance);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ j(below_equal, fail, distance);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastElementValue));
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register maybe_number,
+ Register elements,
+ Register index,
+ XMMRegister xmm_scratch,
+ Label* fail) {
+ Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+
+ JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+ CheckMap(maybe_number,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ cmpl(FieldOperand(maybe_number, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ j(greater_equal, &maybe_nan, Label::kNear);
+
+ bind(&not_nan);
+ movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ xmm_scratch);
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ j(greater, &is_nan, Label::kNear);
+ cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+ j(zero, &not_nan);
+ bind(&is_nan);
+ // Convert all NaNs to the same canonical NaN value when they are stored in
+ // the double array.
+ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ movq(xmm_scratch, kScratchRegister);
+ jmp(&have_double_value, Label::kNear);
+
+ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ // Preserve original value.
+ SmiToInteger32(kScratchRegister, maybe_number);
+ cvtlsi2sd(xmm_scratch, kScratchRegister);
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ xmm_scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -2707,7 +2835,8 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
testl(function, Immediate(kSmiTagMask));
j(zero, miss);
@@ -2716,6 +2845,17 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CmpObjectType(function, JS_FUNCTION_TYPE, result);
j(not_equal, miss);
+ if (miss_on_bound_function) {
+ movq(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
+ // field).
+ TestBit(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset),
+ SharedFunctionInfo::kBoundFunction);
+ j(not_zero, miss);
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
@@ -2787,10 +2927,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -2816,6 +2956,9 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected,
actual,
@@ -2847,6 +2990,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
Register dummy = rax;
InvokePrologue(expected,
@@ -2877,6 +3023,9 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2891,34 +3040,24 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- ASSERT(function->is_compiled());
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Get the function and setup the context.
- Move(rdi, Handle<JSFunction>(function));
+ Move(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- if (V8::UseCrankshaft()) {
- // Since Crankshaft can recompile a function, we need to load
- // the Code object every time we call the function.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
- } else {
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code,
- expected,
- actual,
- RelocInfo::CODE_TARGET,
- flag,
- call_wrapper,
- call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
@@ -3759,6 +3898,20 @@ void MacroAssembler::CopyBytes(Register destination,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ jmp(&entry);
+ bind(&loop);
+ movq(Operand(start_offset, 0), filler);
+ addq(start_offset, Immediate(kPointerSize));
+ bind(&entry);
+ cmpq(start_offset, end_offset);
+ j(less, &loop);
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -3858,6 +4011,7 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ ASSERT(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
@@ -3872,6 +4026,17 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
@@ -3892,6 +4057,195 @@ CodePatcher::~CodePatcher() {
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ movq(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ if (mask < (1 << kBitsPerByte)) {
+ testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+ Immediate(static_cast<uint8_t>(mask)));
+ } else {
+ testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* on_black,
+ Label::Distance on_black_distance) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ // The mask_scratch register contains a 1 at the position of the first bit
+ // and a 0 at all other positions, including the position of the second bit.
+ movq(rcx, mask_scratch);
+ // Make rcx into a mask that covers both marking bits using the operation
+ // rcx = mask | (mask << 1).
+ lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ // Note that we are using a 4-byte aligned 8-byte load.
+ and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpq(mask_scratch, rcx);
+ j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+ Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance) {
+ Label is_data_object;
+ movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ j(equal, &is_data_object, Label::kNear);
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(kIsIndirectStringMask | kIsNotStringMask));
+ j(not_zero, not_data_object, not_data_object_distance);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ movq(bitmap_reg, addr_reg);
+ // Sign extended 32 bit immediate.
+ and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movq(rcx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+ shrl(rcx, Immediate(shift));
+ and_(rcx,
+ Immediate((Page::kPageAlignmentMask >> shift) &
+ ~(Bitmap::kBytesPerCell - 1)));
+
+ addq(bitmap_reg, rcx);
+ movq(rcx, addr_reg);
+ shrl(rcx, Immediate(kPointerSizeLog2));
+ and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ movl(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* value_is_white_and_not_data,
+ Label::Distance distance) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ j(not_zero, &done, Label::kNear);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ push(mask_scratch);
+ // shl. May overflow making the check conservative.
+ addq(mask_scratch, mask_scratch);
+ testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ pop(mask_scratch);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = rcx; // Holds map while checking type.
+ Register length = rcx; // Holds length of object after checking type.
+ Label not_heap_number;
+ Label is_data_object;
+
+ // Check for heap-number
+ movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ j(not_equal, &not_heap_number, Label::kNear);
+ movq(length, Immediate(HeapNumber::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_heap_number);
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = rcx;
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
+ j(not_zero, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ Label not_external;
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ testb(instance_type, Immediate(kExternalStringTag));
+ j(zero, &not_external, Label::kNear);
+ movq(length, Immediate(ExternalString::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_external);
+ // Sequential string, either ASCII or UC16.
+ ASSERT(kAsciiStringTag == 0x04);
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ addq(length, Immediate(0x04));
+ // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+ imul(length, FieldOperand(value, String::kLengthOffset));
+ shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+ and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
+
+ bind(&done);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index e7eb104c0..cf03e59e5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "assembler.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -49,18 +50,23 @@ enum AllocationFlags {
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-static const Register kScratchRegister = { 10 }; // r10.
-static const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
-static const Register kRootRegister = { 13 }; // r13 (callee save).
+const Register kScratchRegister = { 10 }; // r10.
+const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
+const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
-static const int kSmiConstantRegisterValue = 1;
+const int kSmiConstantRegisterValue = 1;
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
-static const int kRootRegisterBias = 128;
+const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
// Forward declaration.
class JumpTarget;
@@ -72,6 +78,7 @@ struct SmiIndex {
ScaleFactor scale;
};
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -134,56 +141,145 @@ class MacroAssembler: public Assembler {
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
- // ---------------------------------------------------------------------------
- // GC Support
-
- // For page containing |object| mark region covering |addr| dirty.
- // RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object,
- Register addr,
- Register scratch);
-
- // Check if object is in new space. The condition cc can be equal or
- // not_equal. If it is equal a jump will be done if the object is on new
- // space. The register scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance near_jump = Label::kFar);
+ // These functions do not arrange the registers in any particular order so
+ // they are not useful for calls that can cause a GC. The caller can
+ // exclude up to 3 registers that do not need to be saved and restored.
+ void PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ void PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
+// ---------------------------------------------------------------------------
+// GC Support
+
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
- // For page containing |object| mark region covering [object+offset]
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. If |offset| is zero, then the |scratch|
- // register contains the array index into the elements array
- // represented as an untagged 32-bit integer. All registers are
- // clobbered by the operation. RecordWrite filters out smis so it
- // does not update the write barrier if the value is a smi.
- void RecordWrite(Register object,
- int offset,
- Register value,
- Register scratch);
-
- // For page containing |object| mark region covering [address]
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_equal, branch, distance);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, equal, branch, distance);
+ }
+
+ // Check if an object has the black incremental marking color. Also uses rcx!
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
+
+ // Detects conservatively whether an object is data-only, ie it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Label* object_is_white_and_not_data,
+ Label::Distance distance);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // Operand(reg, off).
+ void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into a fixed array.
+ // |array| is the array being stored into, |value| is the
+ // object being stored. |index| is the array index represented as a non-smi.
+ // All registers are clobbered by the operation RecordWriteArray
+ // filters out smis so it does not update the write barrier if the
+ // value is a smi.
+ void RecordWriteArray(
+ Register array,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
- // object being stored. All registers are clobbered by the
+ // object being stored. The address and value registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update
// the write barrier if the value is a smi.
- void RecordWrite(Register object,
- Register address,
- Register value);
-
- // For page containing |object| mark region covering [object+offset] dirty.
- // The value is known to not be a smi.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as an untagged 32-bit integer.
- // All registers are clobbered by the operation.
- void RecordWriteNonSmi(Register object,
- int offset,
- Register value,
- Register scratch);
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -192,15 +288,6 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -232,9 +319,9 @@ class MacroAssembler: public Assembler {
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void InitializeRootRegister() {
- ExternalReference roots_address =
- ExternalReference::roots_address(isolate());
- movq(kRootRegister, roots_address);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ movq(kRootRegister, roots_array_start);
addq(kRootRegister, Immediate(kRootRegisterBias));
}
@@ -270,7 +357,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
@@ -639,6 +726,7 @@ class MacroAssembler: public Assembler {
void Push(Smi* smi);
void Test(const Operand& dst, Smi* source);
+
// ---------------------------------------------------------------------------
// String macros.
@@ -684,6 +772,9 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Bit-field support.
+ void TestBit(const Operand& dst, int bit_index);
+
// Handle support
void Move(Register dst, Handle<Object> source);
void Move(const Operand& dst, Handle<Object> source);
@@ -760,6 +851,28 @@ class MacroAssembler: public Assembler {
Label* fail,
Label::Distance distance = Label::kFar);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by index in
+ // the FastDoubleElements array elements, otherwise jump to fail. Note that
+ // index must not be smi-tagged.
+ void StoreNumberToDoubleElements(Register maybe_number,
+ Register elements,
+ Register index,
+ XMMRegister xmm_scratch,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
@@ -820,9 +933,10 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain. The return
- // address must be pushed before calling this helper.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Push a new try handler and link it into try handler chain.
+ void PushTryHandler(CodeLocation try_location,
+ HandlerType type,
+ int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
@@ -965,7 +1079,8 @@ class MacroAssembler: public Assembler {
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
// Generates code for reporting that an illegal operation has
// occurred.
@@ -993,19 +1108,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -1015,19 +1120,9 @@ class MacroAssembler: public Assembler {
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
- // Call a runtime function, returning the CodeStub object called.
- // Try to generate the stub code if necessary. Do not perform a GC
- // but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
- int num_arguments);
-
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
- // Convenience function: Same as above, but takes the fid instead.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
- int num_arguments);
-
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
@@ -1039,38 +1134,26 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
- MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext, int result_size);
- // Jump to a runtime routine.
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
- int result_size);
-
- // Prepares stack to put arguments (aligns and so on).
- // WIN64 calling convention requires to put the pointer to the return value
- // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
- // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+ // Prepares stack to put arguments (aligns and so on). WIN64 calling
+ // convention requires to put the pointer to the return value slot into
+ // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
+ // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
void PrepareCallApiFunction(int arg_stack_space);
- // Calls an API function. Allocates HandleScope, extracts
- // returned value from handle and propagates exceptions.
- // Clobbers r14, r15, rbx and caller-save registers. Restores context.
- // On return removes stack_space * kPointerSize (GCed).
- MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers r14, r15, rbx and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Address function_address, int stack_space);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -1119,6 +1202,13 @@ class MacroAssembler: public Assembler {
int min_length = 0,
Register scratch = kScratchRegister);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1151,11 +1241,18 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1165,6 +1262,7 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
bool root_array_available_;
// Returns a register holding the smi value. The register MUST NOT be
@@ -1188,10 +1286,6 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper = NullCallWrapper(),
CallKind call_kind = CALL_AS_METHOD);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void EnterExitFramePrologue(bool save_rax);
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1218,6 +1312,24 @@ class MacroAssembler: public Assembler {
Register scratch,
bool gc_allowed);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance distance = Label::kFar);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Uses rcx as scratch and leaves addr_reg
+ // unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
@@ -1255,32 +1367,32 @@ class CodePatcher {
// Static helper functions.
// Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
+inline Operand FieldOperand(Register object,
+ Register index,
+ ScaleFactor scale,
+ int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
return ContextOperand(rsi, Context::GLOBAL_INDEX);
}
// Provides access to exit frame stack space (not GCed).
-static inline Operand StackSpaceOperand(int index) {
+inline Operand StackSpaceOperand(int index) {
#ifdef _WIN64
const int kShaddowSpace = 4;
return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index a782bd705..1e0cd6a38 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -431,9 +431,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address());
#endif
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
- __ CallCFunction(compare, num_arguments);
+
+ { // NOLINT: Can't find a way to open this scope without confusing the
+ // linter.
+ AllowExternalCallThatCantCauseGC scope(&masm_);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+ __ CallCFunction(compare, num_arguments);
+ }
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -706,7 +711,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// registers we need.
// Entry code:
__ bind(&entry_label_);
- // Start new stack frame.
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // is generated.
+ FrameScope scope(&masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
__ push(rbp);
__ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
@@ -1238,6 +1248,11 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 76d255579..5a81c8974 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -82,13 +82,12 @@ static void ProbeTable(Isolate* isolate,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register r0,
+ Register r1) {
ASSERT(name->IsSymbol());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
@@ -118,19 +117,14 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
__ j(not_equal, miss_label);
Label done;
- MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- if (result->IsFailure()) return result;
-
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
-
- return result;
}
@@ -211,7 +205,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ Move(prototype, isolate->global());
@@ -219,8 +216,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -312,8 +309,10 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -333,11 +332,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
- __ Move(kScratchRegister, Handle<Object>(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ __ Move(kScratchRegister, interceptor);
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
@@ -345,11 +344,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -403,9 +403,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : object passing the type check
@@ -420,29 +420,25 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
// -- rsp[(argc + 4) * 8] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ Move(rdi, Handle<JSFunction>(function));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ Move(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Pass the additional arguments.
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(rcx, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
+ __ Move(Operand(rsp, 3 * kPointerSize), call_data);
}
// Prepare arguments.
__ lea(rbx, Operand(rsp, 3 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
#ifdef _WIN64
// Win64 uses first register--rcx--for returned value.
Register arguments_arg = rdx;
@@ -465,12 +461,11 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm->TryCallApiFunctionAndReturn(&fun,
- argc + kFastApiCallArguments + 1);
+
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ CallApiFunctionAndReturn(function_address,
+ argc + kFastApiCallArguments + 1);
}
@@ -485,16 +480,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -502,45 +497,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value(); // Success.
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -549,16 +526,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
}
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
Counters* counters = masm->isolate()->counters();
@@ -574,9 +549,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -589,10 +564,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -603,10 +579,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -627,33 +600,27 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm, scratch1);
}
-
- return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -662,27 +629,30 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ // Leave the internal frame.
+ }
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(not_equal, interceptor_succeeded);
@@ -697,32 +667,26 @@ class CallInterceptorCompiler BASE_EMBEDDED {
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -745,12 +709,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch); // Return address.
__ push(receiver_reg);
- __ Push(Handle<Map>(transition));
+ __ Push(transition);
__ push(rax);
__ push(scratch);
__ TailCallExternalReference(
@@ -761,11 +725,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(transition));
+ __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -781,7 +744,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -792,7 +756,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ __ RecordWriteField(
+ scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
}
// Return the value (register rax).
@@ -803,37 +768,53 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, Handle<Object>(cell));
+ __ Move(scratch, cell);
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
masm->isolate()->factory()->the_hole_value());
__ j(not_equal, miss);
- return cell;
}
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ if (current->IsGlobalObject()) {
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
+ }
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+ }
+}
+
#undef __
#define __ ACCESS_MASM((masm()))
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between holder and object registers.
@@ -853,80 +834,58 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* lookup_result = heap()->LookupSymbol(name);
- if (lookup_result->IsFailure()) {
- set_failure(Failure::cast(lookup_result));
- return reg;
- } else {
- name = String::cast(lookup_result->ToObjectUnchecked());
- }
+ name = factory()->LookupSymbol(name);
}
- ASSERT(current->property_dictionary()->FindEntry(name) ==
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
+ reg = holder_reg; // From now on the object will be in holder_reg.
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ Cmp(scratch1, Handle<Map>(current->map()));
- // Branch on the result of the map check.
- __ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ Handle<Map> current_map(current->map());
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ Cmp(scratch1, current_map);
+ } else {
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), current_map);
}
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-
- } else {
- // Check the map of the current object.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
// Branch on the result of the map check.
__ j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (in_new_space) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Move(reg, prototype);
}
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ Move(reg, Handle<JSObject>(prototype));
}
if (save_at_depth == depth) {
@@ -936,62 +895,46 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Go to the next object in the prototype chain.
current = prototype;
}
+ ASSERT(current.is_identical_to(holder));
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
__ j(not_equal, miss);
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(current == holder);
+ // Perform security check for access to the global object.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
if (current->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- current = object;
- while (current != holder) {
- if (current->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(current),
- name,
- scratch1,
- miss);
- if (cell->IsFailure()) {
- set_failure(Failure::cast(cell));
- return reg;
- }
- }
- current = JSObject::cast(current->GetPrototype());
- }
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check the prototype chain.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -999,25 +942,22 @@ void StubCompiler::GenerateLoadField(JSObject* object,
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
-
- Handle<AccessorInfo> callback_handle(callback);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch2.is(reg));
@@ -1025,11 +965,11 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(receiver); // receiver
__ push(reg); // holder
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch1, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch1, callback);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
- __ Push(Handle<Object>(callback_handle->data()));
+ __ Push(Handle<Object>(callback->data()));
}
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
@@ -1048,10 +988,6 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ movq(name_arg, rsp);
__ push(scratch2); // Restore return address.
- // Do call through the api.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
// 3 elements array for v8::Agruments::values_ and handler for name.
const int kStackSpace = 4;
@@ -1068,45 +1004,42 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ CallApiFunctionAndReturn(getter_address, kStackSpace);
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ Move(rax, Handle<Object>(value));
+ __ Move(rax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1122,9 +1055,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1139,47 +1072,49 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
- }
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
- __ LeaveInternalFrame();
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ // Leave the internal frame.
+ }
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1191,15 +1126,15 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), rax, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ ret(0);
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
@@ -1208,7 +1143,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ pop(scratch2); // return address
__ push(receiver);
__ push(holder_reg);
- __ Move(holder_reg, Handle<AccessorInfo>(callback));
+ __ Move(holder_reg, callback);
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
__ push(holder_reg);
__ push(name_reg);
@@ -1237,17 +1172,17 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, Handle<String>(name));
+ __ Cmp(rcx, name);
__ j(not_equal, miss);
}
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1260,7 +1195,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(rdx, miss);
}
@@ -1269,15 +1204,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rdi, cell);
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1290,30 +1226,26 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
// Check the shared function info. Make sure it hasn't changed.
__ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- __ j(not_equal, miss);
} else {
- __ Cmp(rdi, Handle<JSFunction>(function));
- __ j(not_equal, miss);
+ __ Cmp(rdi, function);
}
+ __ j(not_equal, miss);
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
+ extra_state_);
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1353,7 +1285,7 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -1361,19 +1293,19 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1383,10 +1315,9 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -1396,14 +1327,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
- CheckPrototypes(JSObject::cast(object),
- rdx,
- holder,
- rbx,
- rax,
- rdi,
- name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1421,7 +1346,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier;
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1435,30 +1360,40 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements);
+ // Check if value is a smi.
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ JumpIfNotSmi(rcx, &with_write_barrier);
+
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
__ lea(rdx, FieldOperand(rbx,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx);
- // Check if value is a smi.
__ Integer32ToSmi(rax, rax); // Return new length as smi.
-
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- __ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
- __ InNewSpace(rbx, rcx, equal, &exit);
+ __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(rdi, &call_builtin);
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ // Push the element.
+ __ lea(rdx, FieldOperand(rbx,
+ rax, times_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ movq(Operand(rdx, 0), rcx);
- __ RecordWriteHelper(rbx, rdx, rcx);
+ __ RecordWrite(rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1466,6 +1401,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ jmp(&call_builtin);
}
+ __ movq(rdi, Operand(rsp, argc * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(rdi, &no_fast_elements_check);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
+ __ bind(&no_fast_elements_check);
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
@@ -1489,16 +1433,22 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// We fit and could grow elements.
__ Store(new_space_allocation_top, rcx);
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
// Push the argument...
- __ movq(Operand(rdx, 0), rcx);
+ __ movq(Operand(rdx, 0), rdi);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
}
+ // We know the elements array is in new space so we don't need the
+ // remembered set, but we just pushed a value onto it so we may have to
+ // tell the incremental marker to rescan the object that we just grew. We
+ // don't need to worry about the holes because they are in old space and
+ // already marked black.
+ __ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
// Restore receiver to rdx as finish sequence assumes it's here.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1510,7 +1460,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
@@ -1522,19 +1471,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -1544,10 +1493,9 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -1557,9 +1505,8 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
- CheckPrototypes(JSObject::cast(object), rdx,
- holder, rbx,
- rax, rdi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1605,20 +1552,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
1);
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1628,7 +1574,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
@@ -1636,13 +1582,11 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1650,13 +1594,12 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Context::STRING_FUNCTION_INDEX,
rax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rbx;
Register index = rdi;
- Register scratch = rdx;
Register result = rax;
__ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1665,19 +1608,18 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1687,22 +1629,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
+ __ Move(rcx, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1712,21 +1653,18 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1734,14 +1672,13 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Context::STRING_FUNCTION_INDEX,
rax,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rax;
Register index = rdi;
- Register scratch1 = rbx;
- Register scratch2 = rdx;
+ Register scratch = rdx;
Register result = rax;
__ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1750,45 +1687,42 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kEmptyStringRootIndex);
__ ret((argc + 1) * kPointerSize);
}
-
__ bind(&miss);
// Restore function name in rcx.
- __ Move(rcx, Handle<String>(name));
+ __ Move(rcx, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1797,25 +1731,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ const int argc = arguments().immediate();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
__ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -1830,17 +1762,17 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ SmiAndConstant(code, code, Smi::FromInt(0xffff));
- StringCharFromCodeGenerator char_from_code_generator(code, rax);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, rax);
+ generator.GenerateFast(masm());
__ ret(2 * kPointerSize);
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1848,29 +1780,30 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// TODO(872): implement this.
- return heap()->undefined_value();
+ return Handle<Code>::null();
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rcx : function name
// -- rsp[0] : return address
@@ -1879,28 +1812,25 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ const int argc = arguments().immediate();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
__ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
-
// Load the (only) argument into rax.
__ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -1957,7 +1887,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1965,33 +1895,31 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// rcx: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
-
GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
@@ -2010,32 +1938,30 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, depth, &miss);
// Move the return address on top of the stack.
__ movq(rax, Operand(rsp, 3 * kPointerSize));
__ movq(Operand(rsp, 0 * kPointerSize), rax);
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiCall(masm(), optimization, argc);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// rcx : function name
@@ -2048,16 +1974,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -2074,14 +1998,13 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
Counters* counters = isolate()->counters();
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(counters->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
+ rdi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2092,28 +2015,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(rdx, &fast);
@@ -2123,18 +2043,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2145,17 +2065,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2163,17 +2084,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2184,30 +2104,20 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- rax,
- &miss);
- if (result->IsFailure()) return result;
+ CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
+ compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
+ &miss);
// Restore receiver.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2226,7 +2136,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Invoke the function.
__ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -2234,19 +2144,19 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Handle load cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2258,23 +2168,17 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
@@ -2289,39 +2193,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1);
- ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- } else {
- Handle<Code> code(function->code());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
+
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2331,12 +2227,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
Label miss;
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2344,13 +2235,14 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2378,7 +2270,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ pop(rbx); // remove the return address
__ push(rdx); // receiver
- __ Push(Handle<AccessorInfo>(callback)); // callback info
+ __ Push(callback); // callback info
__ push(rcx); // name
__ push(rax); // value
__ push(rbx); // restore return address
@@ -2398,8 +2290,9 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2447,9 +2340,10 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2463,19 +2357,36 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Handle<Map>(object->map()));
__ j(not_equal, &miss);
+ // Compute the cell operand to use.
+ __ Move(rbx, cell);
+ Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
+
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
- __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
// Store the value in the cell.
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
+ __ movq(cell_operand, rax);
+ Label done;
+ __ JumpIfSmi(rax, &done);
+
+ __ movq(rcx, rax);
+ __ lea(rdx, cell_operand);
+ // Cells are always in the remembered set.
+ __ RecordWrite(rbx, // Object.
+ rdx, // Address.
+ rcx, // Value.
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
// Return the value (register rax).
+ __ bind(&done);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@@ -2491,10 +2402,10 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2507,16 +2418,11 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
- __ Cmp(rcx, Handle<String>(name));
+ __ Cmp(rcx, name);
__ j(not_equal, &miss);
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- rdx, rcx, rbx,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2525,39 +2431,38 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Code* stub;
+
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2565,18 +2470,22 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ JumpIfSmi(rdx, &miss);
+ __ JumpIfSmi(rdx, &miss, Label::kNear);
- Register map_reg = rbx;
- __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
+ for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(current));
- __ Cmp(map_reg, map);
- __ j(equal,
- Handle<Code>(handler_ics->at(current)),
- RelocInfo::CODE_TARGET);
+ __ Cmp(rdi, receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -2584,13 +2493,13 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2609,15 +2518,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- rdx,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2629,14 +2531,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2653,24 +2555,19 @@ MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
- rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2679,10 +2576,10 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2699,32 +2596,22 @@ MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rax,
- rcx,
- rdx,
- rbx,
- rdi,
- name,
- &miss);
-
+ GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2733,11 +2620,12 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -2748,7 +2636,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(rax, &miss);
}
@@ -2756,7 +2644,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
// Get the value from the cell.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ Move(rbx, cell);
__ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -2782,9 +2670,9 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- rax : key
@@ -2797,7 +2685,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
@@ -2811,34 +2699,27 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
- rcx, rdi, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+ name, &miss);
__ bind(&miss);
-
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2847,10 +2728,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2862,7 +2744,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -2876,35 +2758,27 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
Label miss;
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- name,
- &miss);
+ GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
+ name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2914,7 +2788,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2926,7 +2801,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
@@ -2939,7 +2814,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2951,7 +2827,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
@@ -2964,7 +2840,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -2976,7 +2853,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ Cmp(rax, Handle<String>(name));
+ __ Cmp(rax, name);
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
@@ -2989,32 +2866,29 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3028,24 +2902,22 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
int receiver_count = receiver_maps->length();
for (int current = 0; current < receiver_count; ++current) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(current));
- __ Cmp(map_reg, map);
- __ j(equal,
- Handle<Code>(handler_ics->at(current)),
- RelocInfo::CODE_TARGET);
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
}
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rdi : constructor
@@ -3088,12 +2960,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+ &generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
@@ -3118,7 +2986,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r9: first in-object property of the JSObject
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
// Check if the argument assigned to the property is actually passed.
@@ -3166,10 +3034,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code =
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
@@ -3436,6 +3302,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3503,6 +3370,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3634,15 +3502,17 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3665,13 +3535,22 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
__ j(above_equal, &miss_force_generic);
}
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ movq(rdx, rax);
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ RecordWrite(rdi, 0, rdx, rcx);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(rax, &transition_elements_kind);
+ __ SmiToInteger32(rcx, rcx);
+ __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ } else {
+ // Do the store and update the write barrier.
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ SmiToInteger32(rcx, rcx);
+ __ lea(rcx,
+ FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ movq(Operand(rcx, 0), rax);
+ // Make sure to preserve the value in register rax.
+ __ movq(rdx, rax);
+ __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs);
+ }
// Done.
__ ret(0);
@@ -3681,6 +3560,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -3693,8 +3576,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan;
- Label have_double_value, not_nan;
+ Label miss_force_generic, transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3715,50 +3597,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ j(above_equal, &miss_force_generic);
// Handle smi values specially
- __ JumpIfSmi(rax, &smi_value, Label::kNear);
-
- __ CheckMap(rax,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rax, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- __ j(greater_equal, &maybe_nan, Label::kNear);
-
- __ bind(&not_nan);
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ bind(&have_double_value);
- __ SmiToInteger32(rcx, rcx);
- __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
- xmm0);
- __ ret(0);
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ j(greater, &is_nan, Label::kNear);
- __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
- __ j(zero, &not_nan);
- __ bind(&is_nan);
- // Convert all NaNs to the same canonical NaN value when they are stored in
- // the double array.
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(xmm0, kScratchRegister);
- __ jmp(&have_double_value, Label::kNear);
-
- __ bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- __ SmiToInteger32(rdx, rax);
- __ push(rdx);
- __ fild_s(Operand(rsp, 0));
- __ pop(rdx);
__ SmiToInteger32(rcx, rcx);
- __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+ __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
+ &transition_elements_kind);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3766,6 +3607,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ // Restore smi-tagging of rcx.
+ __ Integer32ToSmi(rcx, rcx);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index 4870105f3..7e506e7ca 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -53,6 +53,14 @@ inline void* Zone::New(int size) {
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
+ // If the allocation size is divisible by 8 then we return an 8-byte aligned
+ // address.
+ if (kPointerSize == 4 && kAlignment == 4) {
+ position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
+ } else {
+ ASSERT(kAlignment >= kPointerSize);
+ }
+
// Check if the requested size is available without expanding.
Address result = position_;
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index f60ac0d3e..2ca3c4d36 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -86,7 +86,9 @@ class Zone {
friend class Isolate;
friend class ZoneScope;
- // All pointers returned from New() have this alignment.
+ // All pointers returned from New() have this alignment. In addition, if the
+ // object being allocated has a size that is divisible by 8 then its alignment
+ // will be 8.
static const int kAlignment = kPointerSize;
// Never allocate segments smaller than this size in bytes.