summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorTimothy J Fontaine <tjfontaine@gmail.com>2013-10-22 15:14:25 -0700
committerTimothy J Fontaine <tjfontaine@gmail.com>2013-10-23 09:17:31 -0700
commita53c763c16eeabb0901a05dbcf38a72fa96d2f26 (patch)
tree309bf250e1521cedf0e945d7a7629db511e64498 /deps/v8/src
parent54910044b33a6405c72ad085915a55c575c027fc (diff)
downloadnode-a53c763c16eeabb0901a05dbcf38a72fa96d2f26.tar.gz
v8: upgrade 3.21.18.3
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc175
-rw-r--r--deps/v8/src/accessors.h98
-rw-r--r--deps/v8/src/api.cc951
-rw-r--r--deps/v8/src/api.h12
-rw-r--r--deps/v8/src/apinatives.js65
-rw-r--r--deps/v8/src/arguments.cc138
-rw-r--r--deps/v8/src/arguments.h118
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h5
-rw-r--r--deps/v8/src/arm/assembler-arm.cc162
-rw-r--r--deps/v8/src/arm/assembler-arm.h9
-rw-r--r--deps/v8/src/arm/builtins-arm.cc182
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc529
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h38
-rw-r--r--deps/v8/src/arm/codegen-arm.h6
-rw-r--r--deps/v8/src/arm/constants-arm.h2
-rw-r--r--deps/v8/src/arm/cpu-arm.cc9
-rw-r--r--deps/v8/src/arm/debug-arm.cc6
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc203
-rw-r--r--deps/v8/src/arm/disasm-arm.cc3
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc38
-rw-r--r--deps/v8/src/arm/ic-arm.cc8
-rw-r--r--deps/v8/src/arm/lithium-arm.cc152
-rw-r--r--deps/v8/src/arm/lithium-arm.h507
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc333
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h12
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc242
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h67
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc119
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h11
-rw-r--r--deps/v8/src/arm/simulator-arm.cc95
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc678
-rw-r--r--deps/v8/src/assembler.cc11
-rw-r--r--deps/v8/src/assembler.h34
-rw-r--r--deps/v8/src/ast.cc38
-rw-r--r--deps/v8/src/ast.h518
-rw-r--r--deps/v8/src/bootstrapper.cc163
-rw-r--r--deps/v8/src/bootstrapper.h4
-rw-r--r--deps/v8/src/builtins.cc51
-rw-r--r--deps/v8/src/builtins.h28
-rw-r--r--deps/v8/src/checks.cc28
-rw-r--r--deps/v8/src/checks.h17
-rw-r--r--deps/v8/src/circular-queue-inl.h62
-rw-r--r--deps/v8/src/circular-queue.cc125
-rw-r--r--deps/v8/src/circular-queue.h72
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc309
-rw-r--r--deps/v8/src/code-stubs.cc34
-rw-r--r--deps/v8/src/code-stubs.h136
-rw-r--r--deps/v8/src/codegen.cc17
-rw-r--r--deps/v8/src/compiler.cc186
-rw-r--r--deps/v8/src/compiler.h54
-rw-r--r--deps/v8/src/contexts.cc43
-rw-r--r--deps/v8/src/contexts.h22
-rw-r--r--deps/v8/src/counters.cc10
-rw-r--r--deps/v8/src/counters.h14
-rw-r--r--deps/v8/src/cpu-profiler-inl.h21
-rw-r--r--deps/v8/src/cpu-profiler.cc137
-rw-r--r--deps/v8/src/cpu-profiler.h37
-rw-r--r--deps/v8/src/cpu.cc466
-rw-r--r--deps/v8/src/cpu.h91
-rw-r--r--deps/v8/src/d8-debug.cc22
-rw-r--r--deps/v8/src/d8-debug.h7
-rw-r--r--deps/v8/src/d8.cc41
-rw-r--r--deps/v8/src/d8.h14
-rw-r--r--deps/v8/src/debug-agent.cc111
-rw-r--r--deps/v8/src/debug-agent.h36
-rw-r--r--deps/v8/src/debug-debugger.js9
-rw-r--r--deps/v8/src/debug.cc179
-rw-r--r--deps/v8/src/debug.h38
-rw-r--r--deps/v8/src/deoptimizer.cc710
-rw-r--r--deps/v8/src/deoptimizer.h128
-rw-r--r--deps/v8/src/disassembler.cc6
-rw-r--r--deps/v8/src/effects.h2
-rw-r--r--deps/v8/src/elements.cc12
-rw-r--r--deps/v8/src/execution.cc119
-rw-r--r--deps/v8/src/execution.h51
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc6
-rw-r--r--deps/v8/src/extensions/gc-extension.cc5
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.cc333
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.h85
-rw-r--r--deps/v8/src/extensions/i18n/break-iterator.js197
-rw-r--r--deps/v8/src/extensions/i18n/collator.js209
-rw-r--r--deps/v8/src/extensions/i18n/date-format.js474
-rw-r--r--deps/v8/src/extensions/i18n/footer.js40
-rw-r--r--deps/v8/src/extensions/i18n/globals.js168
-rw-r--r--deps/v8/src/extensions/i18n/header.js41
-rw-r--r--deps/v8/src/extensions/i18n/i18n-extension.cc77
-rw-r--r--deps/v8/src/extensions/i18n/i18n-extension.h51
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.cc177
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.h91
-rw-r--r--deps/v8/src/extensions/i18n/i18n-utils.js536
-rw-r--r--deps/v8/src/extensions/i18n/locale.js190
-rw-r--r--deps/v8/src/extensions/i18n/number-format.js289
-rw-r--r--deps/v8/src/extensions/i18n/overrides.js220
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc2
-rw-r--r--deps/v8/src/factory.cc93
-rw-r--r--deps/v8/src/factory.h21
-rw-r--r--deps/v8/src/flag-definitions.h90
-rw-r--r--deps/v8/src/flags.cc11
-rw-r--r--deps/v8/src/flags.h3
-rw-r--r--deps/v8/src/frames.cc7
-rw-r--r--deps/v8/src/full-codegen.cc22
-rw-r--r--deps/v8/src/full-codegen.h8
-rw-r--r--deps/v8/src/gdb-jit.cc8
-rw-r--r--deps/v8/src/global-handles.cc74
-rw-r--r--deps/v8/src/global-handles.h20
-rw-r--r--deps/v8/src/globals.h135
-rw-r--r--deps/v8/src/handles-inl.h11
-rw-r--r--deps/v8/src/handles.cc34
-rw-r--r--deps/v8/src/handles.h5
-rw-r--r--deps/v8/src/heap-inl.h58
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc51
-rw-r--r--deps/v8/src/heap-snapshot-generator.h9
-rw-r--r--deps/v8/src/heap.cc165
-rw-r--r--deps/v8/src/heap.h30
-rw-r--r--deps/v8/src/hydrogen-alias-analysis.h105
-rw-r--r--deps/v8/src/hydrogen-bce.cc55
-rw-r--r--deps/v8/src/hydrogen-bce.h2
-rw-r--r--deps/v8/src/hydrogen-bch.cc40
-rw-r--r--deps/v8/src/hydrogen-canonicalize.cc4
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc117
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.h13
-rw-r--r--deps/v8/src/hydrogen-infer-representation.cc34
-rw-r--r--deps/v8/src/hydrogen-instructions.cc101
-rw-r--r--deps/v8/src/hydrogen-instructions.h1486
-rw-r--r--deps/v8/src/hydrogen-osr.cc19
-rw-r--r--deps/v8/src/hydrogen-osr.h9
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc70
-rw-r--r--deps/v8/src/hydrogen.cc1403
-rw-r--r--deps/v8/src/hydrogen.h277
-rw-r--r--deps/v8/src/i18n.cc144
-rw-r--r--deps/v8/src/i18n.h31
-rw-r--r--deps/v8/src/i18n.js2116
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h10
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc118
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h8
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc166
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc468
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h4
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc57
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc14
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc6
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc257
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc16
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc33
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc6
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc948
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h102
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.h2
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc144
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h544
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc301
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h20
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc5
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc684
-rw-r--r--deps/v8/src/ic-inl.h1
-rw-r--r--deps/v8/src/ic.cc209
-rw-r--r--deps/v8/src/ic.h75
-rw-r--r--deps/v8/src/isolate-inl.h18
-rw-r--r--deps/v8/src/isolate.cc126
-rw-r--r--deps/v8/src/isolate.h113
-rw-r--r--deps/v8/src/json-stringifier.h19
-rw-r--r--deps/v8/src/jsregexp.cc128
-rw-r--r--deps/v8/src/jsregexp.h47
-rw-r--r--deps/v8/src/lazy-instance.h19
-rw-r--r--deps/v8/src/lithium-allocator.cc2
-rw-r--r--deps/v8/src/lithium.cc10
-rw-r--r--deps/v8/src/lithium.h30
-rw-r--r--deps/v8/src/liveedit.cc130
-rw-r--r--deps/v8/src/log-utils.cc12
-rw-r--r--deps/v8/src/log-utils.h4
-rw-r--r--deps/v8/src/log.cc129
-rw-r--r--deps/v8/src/log.h24
-rw-r--r--deps/v8/src/macros.py8
-rw-r--r--deps/v8/src/mark-compact-inl.h2
-rw-r--r--deps/v8/src/mark-compact.cc86
-rw-r--r--deps/v8/src/mark-compact.h2
-rw-r--r--deps/v8/src/marking-thread.cc20
-rw-r--r--deps/v8/src/marking-thread.h13
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h5
-rw-r--r--deps/v8/src/mips/assembler-mips.cc11
-rw-r--r--deps/v8/src/mips/builtins-mips.cc178
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc954
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h134
-rw-r--r--deps/v8/src/mips/codegen-mips.h6
-rw-r--r--deps/v8/src/mips/cpu-mips.cc8
-rw-r--r--deps/v8/src/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc202
-rw-r--r--deps/v8/src/mips/disasm-mips.cc3
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc40
-rw-r--r--deps/v8/src/mips/ic-mips.cc9
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc300
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h12
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.h2
-rw-r--r--deps/v8/src/mips/lithium-mips.cc109
-rw-r--r--deps/v8/src/mips/lithium-mips.h493
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc433
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h110
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc3
-rw-r--r--deps/v8/src/mips/simulator-mips.cc140
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc803
-rw-r--r--deps/v8/src/mksnapshot.cc18
-rw-r--r--deps/v8/src/natives.h3
-rw-r--r--deps/v8/src/object-observe.js440
-rw-r--r--deps/v8/src/objects-debug.cc46
-rw-r--r--deps/v8/src/objects-inl.h233
-rw-r--r--deps/v8/src/objects-printer.cc4
-rw-r--r--deps/v8/src/objects-visiting-inl.h5
-rw-r--r--deps/v8/src/objects.cc1833
-rw-r--r--deps/v8/src/objects.h464
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc151
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h62
-rw-r--r--deps/v8/src/parser.cc73
-rw-r--r--deps/v8/src/parser.h6
-rw-r--r--deps/v8/src/platform-cygwin.cc126
-rw-r--r--deps/v8/src/platform-freebsd.cc111
-rw-r--r--deps/v8/src/platform-linux.cc304
-rw-r--r--deps/v8/src/platform-macos.cc96
-rw-r--r--deps/v8/src/platform-nullos.cc573
-rw-r--r--deps/v8/src/platform-openbsd.cc117
-rw-r--r--deps/v8/src/platform-posix.cc273
-rw-r--r--deps/v8/src/platform-solaris.cc131
-rw-r--r--deps/v8/src/platform-win32.cc495
-rw-r--r--deps/v8/src/platform.h251
-rw-r--r--deps/v8/src/platform/condition-variable.cc345
-rw-r--r--deps/v8/src/platform/condition-variable.h140
-rw-r--r--deps/v8/src/platform/elapsed-timer.h120
-rw-r--r--deps/v8/src/platform/mutex.cc214
-rw-r--r--deps/v8/src/platform/mutex.h238
-rw-r--r--deps/v8/src/platform/semaphore.cc214
-rw-r--r--deps/v8/src/platform/semaphore.h126
-rw-r--r--deps/v8/src/platform/socket.cc224
-rw-r--r--deps/v8/src/platform/socket.h101
-rw-r--r--deps/v8/src/platform/time.cc613
-rw-r--r--deps/v8/src/platform/time.h413
-rw-r--r--deps/v8/src/preparser.h5
-rw-r--r--deps/v8/src/prettyprinter.cc10
-rw-r--r--deps/v8/src/prettyprinter.h6
-rw-r--r--deps/v8/src/profile-generator-inl.h8
-rw-r--r--deps/v8/src/profile-generator.cc99
-rw-r--r--deps/v8/src/profile-generator.h37
-rw-r--r--deps/v8/src/property-details.h2
-rw-r--r--deps/v8/src/property.h5
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc3
-rw-r--r--deps/v8/src/regexp-stack.cc1
-rw-r--r--deps/v8/src/rewriter.cc4
-rw-r--r--deps/v8/src/runtime-profiler.cc41
-rw-r--r--deps/v8/src/runtime.cc833
-rw-r--r--deps/v8/src/runtime.h27
-rw-r--r--deps/v8/src/sampler.cc321
-rw-r--r--deps/v8/src/sampler.h19
-rw-r--r--deps/v8/src/scopeinfo.cc8
-rw-r--r--deps/v8/src/scopes.cc34
-rw-r--r--deps/v8/src/serialize.cc61
-rw-r--r--deps/v8/src/serialize.h28
-rw-r--r--deps/v8/src/snapshot-common.cc4
-rw-r--r--deps/v8/src/snapshot.h2
-rw-r--r--deps/v8/src/spaces-inl.h8
-rw-r--r--deps/v8/src/spaces.cc76
-rw-r--r--deps/v8/src/spaces.h46
-rw-r--r--deps/v8/src/store-buffer-inl.h2
-rw-r--r--deps/v8/src/store-buffer.cc14
-rw-r--r--deps/v8/src/string-stream.cc112
-rw-r--r--deps/v8/src/string-stream.h10
-rw-r--r--deps/v8/src/stub-cache.cc233
-rw-r--r--deps/v8/src/stub-cache.h74
-rw-r--r--deps/v8/src/sweeper-thread.cc20
-rw-r--r--deps/v8/src/sweeper-thread.h13
-rw-r--r--deps/v8/src/transitions.cc20
-rw-r--r--deps/v8/src/transitions.h3
-rw-r--r--deps/v8/src/type-info.cc9
-rw-r--r--deps/v8/src/types.h2
-rw-r--r--deps/v8/src/typing.cc2
-rw-r--r--deps/v8/src/unique.h266
-rw-r--r--deps/v8/src/utils/random-number-generator.cc136
-rw-r--r--deps/v8/src/utils/random-number-generator.h106
-rw-r--r--deps/v8/src/v8-counters.cc14
-rw-r--r--deps/v8/src/v8.cc105
-rw-r--r--deps/v8/src/v8.h26
-rw-r--r--deps/v8/src/v8dll-main.cc6
-rw-r--r--deps/v8/src/v8globals.h35
-rw-r--r--deps/v8/src/v8threads.cc19
-rw-r--r--deps/v8/src/v8threads.h6
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/win32-headers.h9
-rw-r--r--deps/v8/src/win32-math.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h5
-rw-r--r--deps/v8/src/x64/assembler-x64.cc123
-rw-r--r--deps/v8/src/x64/assembler-x64.h4
-rw-r--r--deps/v8/src/x64/builtins-x64.cc185
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc597
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h4
-rw-r--r--deps/v8/src/x64/codegen-x64.cc22
-rw-r--r--deps/v8/src/x64/codegen-x64.h73
-rw-r--r--deps/v8/src/x64/cpu-x64.cc12
-rw-r--r--deps/v8/src/x64/debug-x64.cc19
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc206
-rw-r--r--deps/v8/src/x64/disasm-x64.cc74
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc47
-rw-r--r--deps/v8/src/x64/ic-x64.cc37
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc368
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h14
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.h2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc85
-rw-r--r--deps/v8/src/x64/lithium-x64.h480
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc229
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h35
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc7
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc867
310 files changed, 20727 insertions, 22630 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index a43eb78b8..669c02baf 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -51,19 +51,27 @@ static C* FindInstanceOf(Isolate* isolate, Object* obj) {
// Entry point that never should be called.
-MaybeObject* Accessors::IllegalSetter(JSObject*, Object*, void*) {
+MaybeObject* Accessors::IllegalSetter(Isolate* isolate,
+ JSObject*,
+ Object*,
+ void*) {
UNREACHABLE();
return NULL;
}
-Object* Accessors::IllegalGetAccessor(Object* object, void*) {
+Object* Accessors::IllegalGetAccessor(Isolate* isolate,
+ Object* object,
+ void*) {
UNREACHABLE();
return object;
}
-MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
+MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
+ JSObject*,
+ Object* value,
+ void*) {
// According to ECMA-262, section 8.6.2.2, page 28, setting
// read-only properties must be silently ignored.
return value;
@@ -75,38 +83,41 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
//
-MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
+MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
// Traverse the prototype chain until we reach an array.
- JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object);
+ JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
return holder == NULL ? Smi::FromInt(0) : holder->length();
}
// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Object* value) {
+Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->native_context()->number_function()->
+ ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = Isolate::Current()->context()->native_context()->
+ Map* number_map = isolate->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
-MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
- Isolate* isolate = object->GetIsolate();
-
+MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
+ JSObject* object,
+ Object* value,
+ void*) {
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
- return object->SetLocalPropertyIgnoreAttributes(
+ return object->SetLocalPropertyIgnoreAttributesTrampoline(
isolate->heap()->length_string(), value, NONE);
}
- value = FlattenNumber(value);
+ value = FlattenNumber(isolate, value);
// Need to call methods that may trigger GC.
HandleScope scope(isolate);
@@ -116,9 +127,11 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Handle<Object> value_handle(value, isolate);
bool has_exception;
- Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
+ Handle<Object> uint32_v =
+ Execution::ToUint32(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
- Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
+ Handle<Object> number_v =
+ Execution::ToNumber(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
@@ -142,7 +155,9 @@ const AccessorDescriptor Accessors::ArrayLength = {
//
-MaybeObject* Accessors::StringGetLength(Object* object, void*) {
+MaybeObject* Accessors::StringGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
Object* value = object;
if (object->IsJSValue()) value = JSValue::cast(object)->value();
if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
@@ -164,7 +179,9 @@ const AccessorDescriptor Accessors::StringLength = {
//
-MaybeObject* Accessors::ScriptGetSource(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetSource(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->source();
}
@@ -182,7 +199,9 @@ const AccessorDescriptor Accessors::ScriptSource = {
//
-MaybeObject* Accessors::ScriptGetName(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetName(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->name();
}
@@ -200,7 +219,7 @@ const AccessorDescriptor Accessors::ScriptName = {
//
-MaybeObject* Accessors::ScriptGetId(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetId(Isolate* isolate, Object* object, void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->id();
}
@@ -218,7 +237,9 @@ const AccessorDescriptor Accessors::ScriptId = {
//
-MaybeObject* Accessors::ScriptGetLineOffset(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetLineOffset(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->line_offset();
}
@@ -236,7 +257,9 @@ const AccessorDescriptor Accessors::ScriptLineOffset = {
//
-MaybeObject* Accessors::ScriptGetColumnOffset(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetColumnOffset(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->column_offset();
}
@@ -254,7 +277,9 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
//
-MaybeObject* Accessors::ScriptGetData(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
@@ -272,7 +297,9 @@ const AccessorDescriptor Accessors::ScriptData = {
//
-MaybeObject* Accessors::ScriptGetType(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetType(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->type();
}
@@ -290,7 +317,9 @@ const AccessorDescriptor Accessors::ScriptType = {
//
-MaybeObject* Accessors::ScriptGetCompilationType(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetCompilationType(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Smi::FromInt(Script::cast(script)->compilation_type());
}
@@ -308,9 +337,10 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
//
-MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate,
+ Object* object,
+ void*) {
JSValue* wrapper = JSValue::cast(object);
- Isolate* isolate = wrapper->GetIsolate();
HandleScope scope(isolate);
Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
@@ -337,7 +367,9 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
//
-MaybeObject* Accessors::ScriptGetContextData(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetContextData(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->context_data();
}
@@ -355,7 +387,9 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromScript(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
Handle<SharedFunctionInfo> eval_from_shared(
@@ -366,7 +400,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script);
}
}
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -382,9 +416,11 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Isolate* isolate,
+ Object* object,
+ void*) {
Script* raw_script = Script::cast(JSValue::cast(object)->value());
- HandleScope scope(raw_script->GetIsolate());
+ HandleScope scope(isolate);
Handle<Script> script(raw_script);
// If this is not a script compiled through eval there is no eval position.
@@ -413,7 +449,9 @@ const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
//
-MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
Script::cast(script)->eval_from_shared()));
@@ -440,15 +478,30 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
//
-Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate, Accessors::FunctionGetPrototype(*object, 0), Object);
+Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionGetPrototype(function->GetIsolate(),
+ *function,
+ NULL),
+ Object);
}
-MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
+ Handle<Object> prototype) {
+ ASSERT(function->should_have_prototype());
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionSetPrototype(function->GetIsolate(),
+ *function,
+ *prototype,
+ NULL),
+ Object);
+}
+
+
+MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
+ Object* object,
+ void*) {
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return isolate->heap()->undefined_value();
while (!function_raw->should_have_prototype()) {
@@ -469,18 +522,17 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
}
-MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
+MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
+ JSObject* object,
Object* value_raw,
void*) {
- Isolate* isolate = object->GetIsolate();
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(),
- value_raw,
- NONE);
+ return object->SetLocalPropertyIgnoreAttributesTrampoline(
+ heap->prototype_string(), value_raw, NONE);
}
HandleScope scope(isolate);
@@ -523,8 +575,9 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
//
-MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
@@ -554,8 +607,9 @@ const AccessorDescriptor Accessors::FunctionLength = {
//
-MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetName(Isolate* isolate,
+ Object* object,
+ void*) {
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
return holder == NULL
? isolate->heap()->undefined_value()
@@ -575,10 +629,12 @@ const AccessorDescriptor Accessors::FunctionName = {
//
-Handle<Object> Accessors::FunctionGetArguments(Handle<Object> object) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate, Accessors::FunctionGetArguments(*object, 0), Object);
+Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionGetArguments(function->GetIsolate(),
+ *function,
+ NULL),
+ Object);
}
@@ -609,8 +665,9 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
}
-MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetArguments(Isolate* isolate,
+ Object* object,
+ void*) {
HandleScope scope(isolate);
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
@@ -732,8 +789,9 @@ class FrameFunctionIterator {
};
-MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
+ Object* object,
+ void*) {
HandleScope scope(isolate);
DisallowHeapAllocation no_allocation;
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
@@ -839,15 +897,16 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
Handle<String> name,
int index,
PropertyAttributes attributes) {
- Factory* factory = name->GetIsolate()->factory();
+ Isolate* isolate = name->GetIsolate();
+ Factory* factory = isolate->factory();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);
info->set_name(*name);
info->set_data(Smi::FromInt(index));
- Handle<Object> getter = v8::FromCData(&ModuleGetExport);
- Handle<Object> setter = v8::FromCData(&ModuleSetExport);
+ Handle<Object> getter = v8::FromCData(isolate, &ModuleGetExport);
+ Handle<Object> setter = v8::FromCData(isolate, &ModuleSetExport);
info->set_getter(*getter);
if (!(attributes & ReadOnly)) info->set_setter(*setter);
return info;
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index ae56a3d44..d9a2130f6 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -77,12 +77,10 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- static Handle<Object> FunctionGetPrototype(Handle<Object> object);
- static Handle<Object> FunctionGetArguments(Handle<Object> object);
-
- MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
- Object* value,
- void*);
+ static Handle<Object> FunctionSetPrototype(Handle<JSFunction> object,
+ Handle<Object> value);
+ static Handle<Object> FunctionGetPrototype(Handle<JSFunction> object);
+ static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
@@ -90,34 +88,70 @@ class Accessors : public AllStatic {
private:
// Accessor functions only used through the descriptor.
- static MaybeObject* FunctionGetPrototype(Object* object, void*);
- static MaybeObject* FunctionGetLength(Object* object, void*);
- static MaybeObject* FunctionGetName(Object* object, void*);
- static MaybeObject* FunctionGetArguments(Object* object, void*);
- static MaybeObject* FunctionGetCaller(Object* object, void*);
- MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
- Object* value, void*);
- static MaybeObject* ArrayGetLength(Object* object, void*);
- static MaybeObject* StringGetLength(Object* object, void*);
- static MaybeObject* ScriptGetName(Object* object, void*);
- static MaybeObject* ScriptGetId(Object* object, void*);
- static MaybeObject* ScriptGetSource(Object* object, void*);
- static MaybeObject* ScriptGetLineOffset(Object* object, void*);
- static MaybeObject* ScriptGetColumnOffset(Object* object, void*);
- static MaybeObject* ScriptGetData(Object* object, void*);
- static MaybeObject* ScriptGetType(Object* object, void*);
- static MaybeObject* ScriptGetCompilationType(Object* object, void*);
- static MaybeObject* ScriptGetLineEnds(Object* object, void*);
- static MaybeObject* ScriptGetContextData(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
+ static MaybeObject* FunctionSetPrototype(Isolate* isolate,
+ JSObject* object,
+ Object*,
+ void*);
+ static MaybeObject* FunctionGetPrototype(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetLength(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*);
+ static MaybeObject* FunctionGetArguments(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetCaller(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ArraySetLength(Isolate* isolate,
+ JSObject* object,
+ Object*,
+ void*);
+ static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*);
+ static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetLineOffset(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetLineEnds(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetContextData(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate,
+ Object* object,
+ void*);
// Helper functions.
- static Object* FlattenNumber(Object* value);
- static MaybeObject* IllegalSetter(JSObject*, Object*, void*);
- static Object* IllegalGetAccessor(Object* object, void*);
- static MaybeObject* ReadOnlySetAccessor(JSObject*, Object* value, void*);
+ static Object* FlattenNumber(Isolate* isolate, Object* value);
+ static MaybeObject* IllegalSetter(Isolate* isolate,
+ JSObject*,
+ Object*,
+ void*);
+ static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ReadOnlySetAccessor(Isolate* isolate,
+ JSObject*,
+ Object* value,
+ void*);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index eb2ffcff1..71a8f4a6c 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -53,6 +53,7 @@
#endif
#include "parser.h"
#include "platform.h"
+#include "platform/time.h"
#include "profile-generator-inl.h"
#include "property-details.h"
#include "property.h"
@@ -61,6 +62,7 @@
#include "scanner-character-streams.h"
#include "snapshot.h"
#include "unicode-inl.h"
+#include "utils/random-number-generator.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -220,25 +222,27 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
// HeapIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
}
- i::V8::SetFatalError();
+ isolate->SignalFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
const char* message = "Allocation failed - process out of memory";
callback(location, message);
// If the callback returns, we stop execution.
- UNREACHABLE();
+ FATAL("API fatal error handler returned after process out of memory");
}
bool Utils::ReportApiFailure(const char* location, const char* message) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, message);
- i::V8::SetFatalError();
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->SignalFatalError();
return false;
}
bool V8::IsDead() {
- return i::V8::IsDead();
+ i::Isolate* isolate = i::Isolate::Current();
+ return isolate->IsDead();
}
@@ -277,7 +281,7 @@ static bool ReportEmptyHandle(const char* location) {
*/
static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
return !isolate->IsInitialized()
- && i::V8::IsDead() ? ReportV8Dead(location) : false;
+ && isolate->IsDead() ? ReportV8Dead(location) : false;
}
@@ -399,9 +403,6 @@ enum CompressedStartupDataItems {
kSnapshotContext,
kLibraries,
kExperimentalLibraries,
-#if defined(V8_I18N_SUPPORT)
- kI18NExtension,
-#endif
kCompressedStartupDataCount
};
@@ -442,17 +443,6 @@ void V8::GetCompressedStartupData(StartupData* compressed_data) {
exp_libraries_source.length();
compressed_data[kExperimentalLibraries].raw_size =
i::ExperimentalNatives::GetRawScriptsSize();
-
-#if defined(V8_I18N_SUPPORT)
- i::Vector<const ii:byte> i18n_extension_source =
- i::I18NNatives::GetScriptsSource();
- compressed_data[kI18NExtension].data =
- reinterpret_cast<const char*>(i18n_extension_source.start());
- compressed_data[kI18NExtension].compressed_size =
- i18n_extension_source.length();
- compressed_data[kI18NExtension].raw_size =
- i::I18NNatives::GetRawScriptsSize();
-#endif
#endif
}
@@ -482,15 +472,6 @@ void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
decompressed_data[kExperimentalLibraries].data,
decompressed_data[kExperimentalLibraries].raw_size);
i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
-
-#if defined(V8_I18N_SUPPORT)
- ASSERT_EQ(i::I18NNatives::GetRawScriptsSize(),
- decompressed_data[kI18NExtension].raw_size);
- i::Vector<const char> i18n_extension_source(
- decompressed_data[kI18NExtension].data,
- decompressed_data[kI18NExtension].raw_size);
- i::I18NNatives::SetRawScriptsSource(i18n_extension_source);
-#endif
#endif
}
@@ -621,7 +602,8 @@ ResourceConstraints::ResourceConstraints()
: max_young_space_size_(0),
max_old_space_size_(0),
max_executable_size_(0),
- stack_limit_(NULL) { }
+ stack_limit_(NULL),
+ is_memory_constrained_() { }
bool SetResourceConstraints(ResourceConstraints* constraints) {
@@ -642,6 +624,10 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
+ if (constraints->is_memory_constrained().has_value) {
+ isolate->set_is_memory_constrained(
+ constraints->is_memory_constrained().value);
+ }
return true;
}
@@ -657,11 +643,22 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
}
+i::Object** V8::CopyPersistent(i::Object** obj) {
+ i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
+#ifdef DEBUG
+ (*obj)->Verify();
+#endif // DEBUG
+ return result.location();
+}
+
+
void V8::MakeWeak(i::Object** object,
void* parameters,
+ WeakCallback weak_callback,
RevivableCallback weak_reference_callback) {
i::GlobalHandles::MakeWeak(object,
parameters,
+ weak_callback,
weak_reference_callback);
}
@@ -676,24 +673,22 @@ void V8::DisposeGlobal(i::Object** obj) {
}
-int V8::Eternalize(i::Isolate* isolate, i::Object** handle) {
- return isolate->eternal_handles()->Create(isolate, *handle);
+void V8::Eternalize(Isolate* v8_isolate, Value* value, int* index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::Object* object = *Utils::OpenHandle(value);
+ isolate->eternal_handles()->Create(isolate, object, index);
}
-i::Object** V8::GetEternal(i::Isolate* isolate, int index) {
- return isolate->eternal_handles()->Get(index).location();
+Local<Value> V8::GetEternal(Isolate* v8_isolate, int index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ return Utils::ToLocal(isolate->eternal_handles()->Get(index));
}
// --- H a n d l e s ---
-HandleScope::HandleScope() {
- Initialize(reinterpret_cast<Isolate*>(i::Isolate::Current()));
-}
-
-
HandleScope::HandleScope(Isolate* isolate) {
Initialize(isolate);
}
@@ -955,21 +950,62 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
}
-void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
+static void TemplateSet(i::Isolate* isolate,
+ v8::Template* templ,
+ int length,
+ v8::Handle<v8::Data>* data) {
+ i::Handle<i::Object> list(Utils::OpenHandle(templ)->property_list(), isolate);
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ Utils::OpenHandle(templ)->set_property_list(*list);
+ }
+ NeanderArray array(list);
+ array.add(Utils::OpenHandle(*v8::Integer::New(length)));
+ for (int i = 0; i < length; i++) {
+ i::Handle<i::Object> value = data[i].IsEmpty() ?
+ i::Handle<i::Object>(isolate->factory()->undefined_value()) :
+ Utils::OpenHandle(*data[i]);
+ array.add(value);
+ }
+}
+
+
+void Template::Set(v8::Handle<String> name,
+ v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_list(*list);
- }
- NeanderArray array(list);
- array.add(Utils::OpenHandle(*name));
- array.add(Utils::OpenHandle(*value));
- array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
+ const int kSize = 3;
+ v8::Handle<v8::Data> data[kSize] = {
+ name,
+ value,
+ v8::Integer::New(attribute)};
+ TemplateSet(isolate, this, kSize, data);
+}
+
+
+void Template::SetAccessorProperty(
+ v8::Local<v8::String> name,
+ v8::Local<FunctionTemplate> getter,
+ v8::Local<FunctionTemplate> setter,
+ v8::PropertyAttribute attribute,
+ v8::AccessControl access_control) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Template::SetAccessor()")) return;
+ ENTER_V8(isolate);
+ ASSERT(!name.IsEmpty());
+ ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
+ i::HandleScope scope(isolate);
+ const int kSize = 5;
+ v8::Handle<v8::Data> data[kSize] = {
+ name,
+ getter,
+ setter,
+ v8::Integer::New(attribute),
+ v8::Integer::New(access_control)};
+ TemplateSet(isolate, this, kSize, data);
}
@@ -1005,69 +1041,48 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
}
-// TODO(dcarney): Remove this abstraction when old callbacks are removed.
-class CallHandlerHelper {
- public:
- static inline void Set(Local<FunctionTemplate> function_template,
- InvocationCallback callback,
- v8::Handle<Value> data) {
- function_template->SetCallHandlerInternal(callback, data);
- }
- static inline void Set(Local<FunctionTemplate> function_template,
- FunctionCallback callback,
- v8::Handle<Value> data) {
- function_template->SetCallHandler(callback, data);
- }
-};
-
-
-template<typename Callback>
static Local<FunctionTemplate> FunctionTemplateNew(
- Callback callback,
+ i::Isolate* isolate,
+ FunctionCallback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
- int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
+ int length,
+ bool do_not_cache) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
- int next_serial_number = isolate->next_serial_number();
- isolate->set_next_serial_number(next_serial_number + 1);
+ obj->set_do_not_cache(do_not_cache);
+ int next_serial_number = 0;
+ if (!do_not_cache) {
+ next_serial_number = isolate->next_serial_number() + 1;
+ isolate->set_next_serial_number(next_serial_number);
+ }
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
- CallHandlerHelper::Set(Utils::ToLocal(obj), callback, data);
+ Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
-
if (!signature.IsEmpty())
obj->set_signature(*Utils::OpenHandle(*signature));
return Utils::ToLocal(obj);
}
-
-Local<FunctionTemplate> FunctionTemplate::New(
- InvocationCallback callback,
- v8::Handle<Value> data,
- v8::Handle<Signature> signature,
- int length) {
- return FunctionTemplateNew(callback, data, signature, length);
-}
-
-
Local<FunctionTemplate> FunctionTemplate::New(
FunctionCallback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
int length) {
- return FunctionTemplateNew(callback, data, signature, length);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
+ LOG_API(isolate, "FunctionTemplate::New");
+ ENTER_V8(isolate);
+ return FunctionTemplateNew(
+ isolate, callback, data, signature, length, false);
}
@@ -1255,16 +1270,14 @@ int TypeSwitch::match(v8::Handle<Value> value) {
#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(cdata); \
+ i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
(obj)->setter(*foreign); \
} while (false)
-template<typename Callback>
-static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
- Callback callback_in,
- v8::Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate();
+void FunctionTemplate::SetCallHandler(FunctionCallback callback,
+ v8::Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -1272,28 +1285,12 @@ static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- FunctionCallback callback =
- i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(function_template)->set_call_code(*obj);
-}
-
-void FunctionTemplate::SetCallHandler(InvocationCallback callback,
- v8::Handle<Value> data) {
- FunctionTemplateSetCallHandler(this, callback, data);
+ Utils::OpenHandle(this)->set_call_code(*obj);
}
-void FunctionTemplate::SetCallHandlerInternal(InvocationCallback callback,
- v8::Handle<Value> data) {
- FunctionTemplateSetCallHandler(this, callback, data);
-}
-
-void FunctionTemplate::SetCallHandler(FunctionCallback callback,
- v8::Handle<Value> data) {
- FunctionTemplateSetCallHandler(this, callback, data);
-}
static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
i::Handle<i::AccessorInfo> obj,
@@ -1316,8 +1313,8 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
template<typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<String> name,
- Getter getter_in,
- Setter setter_in,
+ Getter getter,
+ Setter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes,
@@ -1325,11 +1322,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
- AccessorGetterCallback getter =
- i::CallbackTable::Register(isolate, getter_in);
SET_FIELD_WRAPPED(obj, set_getter, getter);
- AccessorSetterCallback setter =
- i::CallbackTable::Register(isolate, setter_in);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@@ -1407,124 +1400,14 @@ void FunctionTemplate::ReadOnlyPrototype() {
Utils::OpenHandle(this)->set_read_only_prototype(true);
}
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-static void SetNamedInstancePropertyHandler(
- i::Handle<i::FunctionTemplateInfo> function_template,
- Getter getter_in,
- Setter setter_in,
- Query query_in,
- Deleter remover_in,
- Enumerator enumerator_in,
- Handle<Value> data) {
- i::Isolate* isolate = function_template->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- NamedPropertyGetterCallback getter =
- i::CallbackTable::Register(isolate, getter_in);
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- NamedPropertySetterCallback setter =
- i::CallbackTable::Register(isolate, setter_in);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- NamedPropertyQueryCallback query =
- i::CallbackTable::Register(isolate, query_in);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- NamedPropertyDeleterCallback remover =
- i::CallbackTable::Register(isolate, remover_in);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- NamedPropertyEnumeratorCallback enumerator =
- i::CallbackTable::Register(isolate, enumerator_in);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- function_template->set_named_property_handler(*obj);
-}
-
-
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-static void SetIndexedInstancePropertyHandler(
- i::Handle<i::FunctionTemplateInfo> function_template,
- Getter getter_in,
- Setter setter_in,
- Query query_in,
- Deleter remover_in,
- Enumerator enumerator_in,
- Handle<Value> data) {
- i::Isolate* isolate = function_template->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- IndexedPropertyGetterCallback getter =
- i::CallbackTable::Register(isolate, getter_in);
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- IndexedPropertySetterCallback setter =
- i::CallbackTable::Register(isolate, setter_in);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- IndexedPropertyQueryCallback query =
- i::CallbackTable::Register(isolate, query_in);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- IndexedPropertyDeleterCallback remover =
- i::CallbackTable::Register(isolate, remover_in);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- IndexedPropertyEnumeratorCallback enumerator =
- i::CallbackTable::Register(isolate, enumerator_in);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- function_template->set_indexed_property_handler(*obj);
-}
-
-template<typename Callback>
-static void SetInstanceCallAsFunctionHandler(
- i::Handle<i::FunctionTemplateInfo> function_template,
- Callback callback_in,
- Handle<Value> data) {
- i::Isolate* isolate = function_template->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+void FunctionTemplate::RemovePrototype() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::RemovePrototype()")) {
return;
}
ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- FunctionCallback callback =
- i::CallbackTable::Register(isolate, callback_in);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- function_template->set_instance_call_handler(*obj);
+ Utils::OpenHandle(this)->set_remove_prototype(true);
}
@@ -1559,63 +1442,91 @@ Local<ObjectTemplate> ObjectTemplate::New(
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
-static void EnsureConstructor(ObjectTemplate* object_template) {
- if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
- Local<FunctionTemplate> templ = FunctionTemplate::New();
- i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
- constructor->set_instance_template(*Utils::OpenHandle(object_template));
- Utils::OpenHandle(object_template)->set_constructor(*constructor);
+static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
+ ObjectTemplate* object_template) {
+ i::Object* obj = Utils::OpenHandle(object_template)->constructor();
+ if (!obj ->IsUndefined()) {
+ i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
+ return i::Handle<i::FunctionTemplateInfo>(info, info->GetIsolate());
}
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ constructor->set_instance_template(*Utils::OpenHandle(object_template));
+ Utils::OpenHandle(object_template)->set_constructor(*constructor);
+ return constructor;
}
-static inline void AddPropertyToFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> cons,
+static inline void AddPropertyToTemplate(
+ i::Handle<i::TemplateInfo> info,
i::Handle<i::AccessorInfo> obj) {
- i::Handle<i::Object> list(cons->property_accessors(), cons->GetIsolate());
+ i::Handle<i::Object> list(info->property_accessors(), info->GetIsolate());
if (list->IsUndefined()) {
list = NeanderArray().value();
- cons->set_property_accessors(*list);
+ info->set_property_accessors(*list);
}
NeanderArray array(list);
array.add(obj);
}
-template<typename Setter, typename Getter, typename Data>
-static bool ObjectTemplateSetAccessor(
- ObjectTemplate* object_template,
- v8::Handle<String> name,
+static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ Template* template_obj) {
+ return Utils::OpenHandle(template_obj);
+}
+
+
+// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
+static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ ObjectTemplate* object_template) {
+ EnsureConstructor(object_template);
+ return Utils::OpenHandle(object_template);
+}
+
+
+template<typename Setter, typename Getter, typename Data, typename Template>
+static bool TemplateSetAccessor(
+ Template* template_obj,
+ v8::Local<String> name,
Getter getter,
Setter setter,
Data data,
AccessControl settings,
PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ v8::Local<AccessorSignature> signature) {
+ i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
- i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
name, getter, setter, data, settings, attribute, signature);
if (obj.is_null()) return false;
- AddPropertyToFunctionTemplate(cons, obj);
+ i::Handle<i::TemplateInfo> info = GetTemplateInfo(template_obj);
+ AddPropertyToTemplate(info, obj);
return true;
}
-void ObjectTemplate::SetAccessor(v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
- ObjectTemplateSetAccessor(
+bool Template::SetDeclaredAccessor(
+ Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attribute,
+ Local<AccessorSignature> signature,
+ AccessControl settings) {
+ void* null = NULL;
+ return TemplateSetAccessor(
+ this, name, descriptor, null, null, settings, attribute, signature);
+}
+
+
+void Template::SetNativeDataProperty(v8::Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Handle<Value> data,
+ PropertyAttribute attribute,
+ v8::Local<AccessorSignature> signature,
+ AccessControl settings) {
+ TemplateSetAccessor(
this, name, getter, setter, data, settings, attribute, signature);
}
@@ -1627,77 +1538,42 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessControl settings,
PropertyAttribute attribute,
v8::Handle<AccessorSignature> signature) {
- ObjectTemplateSetAccessor(
+ TemplateSetAccessor(
this, name, getter, setter, data, settings, attribute, signature);
}
-bool ObjectTemplate::SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
- AccessControl settings,
- PropertyAttribute attribute,
- Handle<AccessorSignature> signature) {
- void* null = NULL;
- return ObjectTemplateSetAccessor(
- this, name, descriptor, null, null, settings, attribute, signature);
-}
-
-
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(
- ObjectTemplate* object_template,
- Getter getter,
- Setter setter,
- Query query,
- Deleter remover,
- Enumerator enumerator,
+void ObjectTemplate::SetNamedPropertyHandler(
+ NamedPropertyGetterCallback getter,
+ NamedPropertySetterCallback setter,
+ NamedPropertyQueryCallback query,
+ NamedPropertyDeleterCallback remover,
+ NamedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
+ EnsureConstructor(this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- SetNamedInstancePropertyHandler(cons,
- getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::SetNamedPropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- ObjectTemplateSetNamedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
-}
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-void ObjectTemplate::SetNamedPropertyHandler(
- NamedPropertyGetterCallback getter,
- NamedPropertySetterCallback setter,
- NamedPropertyQueryCallback query,
- NamedPropertyDeleterCallback remover,
- NamedPropertyEnumeratorCallback enumerator,
- Handle<Value> data) {
- ObjectTemplateSetNamedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_named_property_handler(*obj);
}
@@ -1746,93 +1622,61 @@ void ObjectTemplate::SetAccessCheckCallbacks(
}
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-void ObjectTemplateSetIndexedPropertyHandler(
- ObjectTemplate* object_template,
- Getter getter,
- Setter setter,
- Query query,
- Deleter remover,
- Enumerator enumerator,
+void ObjectTemplate::SetIndexedPropertyHandler(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback remover,
+ IndexedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
+ EnsureConstructor(this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- SetIndexedInstancePropertyHandler(cons,
- getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- ObjectTemplateSetIndexedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
-}
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter,
- IndexedPropertyQueryCallback query,
- IndexedPropertyDeleterCallback remover,
- IndexedPropertyEnumeratorCallback enumerator,
- Handle<Value> data) {
- ObjectTemplateSetIndexedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_indexed_property_handler(*obj);
}
-template<typename Callback>
-static void ObjectTemplateSetCallAsFunctionHandler(
- ObjectTemplate* object_template,
- Callback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
+ Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate,
"v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
+ EnsureConstructor(this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- SetInstanceCallAsFunctionHandler(cons, callback, data);
-}
-
-
-void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data) {
- return ObjectTemplateSetCallAsFunctionHandler(this, callback, data);
-}
-
-
-void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
- Handle<Value> data) {
- return ObjectTemplateSetCallAsFunctionHandler(this, callback, data);
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ SET_FIELD_WRAPPED(obj, set_callback, callback);
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_instance_call_handler(*obj);
}
@@ -1872,19 +1716,20 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::PreParserApi::PreParse(&stream);
+ return i::PreParserApi::PreParse(i::Isolate::Current(), &stream);
}
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::PreParserApi::PreParse(&stream);
+ return i::PreParserApi::PreParse(isolate, &stream);
} else {
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
- return i::PreParserApi::PreParse(&stream);
+ return i::PreParserApi::PreParse(isolate, &stream);
}
}
@@ -2035,8 +1880,8 @@ Local<Value> Script::Run() {
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result =
- i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate, fun, receiver, 0, NULL, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
raw_result = *result;
}
@@ -2335,8 +2180,8 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
i::Handle<i::JSFunction> fun =
i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
- i::Handle<i::Object> value =
- i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+ i::Handle<i::Object> value = i::Execution::Call(
+ isolate, fun, recv, argc, argv, has_pending_exception);
return value;
}
@@ -2478,7 +2323,7 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(index);
+ i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
return scope.Close(Utils::StackFrameToLocal(obj));
}
@@ -2547,6 +2392,22 @@ int StackFrame::GetColumn() const {
}
+int StackFrame::GetScriptId() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptId()")) {
+ return Message::kNoScriptIdInfo;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> scriptId = GetProperty(self, "scriptId");
+ if (!scriptId->IsSmi()) {
+ return Message::kNoScriptIdInfo;
+ }
+ return i::Smi::cast(*scriptId)->value();
+}
+
+
Local<String> StackFrame::GetScriptName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
@@ -2913,7 +2774,7 @@ Local<String> Value::ToString() const {
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToString(obj, &has_pending_exception);
+ str = i::Execution::ToString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return ToApiHandle<String>(str);
@@ -2933,7 +2794,7 @@ Local<String> Value::ToDetailString() const {
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToDetailString(obj, &has_pending_exception);
+ str = i::Execution::ToDetailString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return ToApiHandle<String>(str);
@@ -2953,7 +2814,7 @@ Local<v8::Object> Value::ToObject() const {
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- val = i::Execution::ToObject(obj, &has_pending_exception);
+ val = i::Execution::ToObject(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
}
return ToApiHandle<Object>(val);
@@ -2991,7 +2852,7 @@ Local<Number> Value::ToNumber() const {
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
+ num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
}
return ToApiHandle<Number>(num);
@@ -3009,7 +2870,7 @@ Local<Integer> Value::ToInteger() const {
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
+ num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
}
return ToApiHandle<Integer>(num);
@@ -3018,7 +2879,7 @@ Local<Integer> Value::ToInteger() const {
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- ApiCheck(isolate != NULL && isolate->IsInitialized() && !i::V8::IsDead(),
+ ApiCheck(isolate != NULL && isolate->IsInitialized() && !isolate->IsDead(),
"v8::internal::Internals::CheckInitialized()",
"Isolate is not initialized or V8 has died");
}
@@ -3104,12 +2965,6 @@ void v8::ArrayBuffer::CheckCast(Value* that) {
}
-void v8::ArrayBuffer::Allocator::Free(void* data) {
- API_Fatal("v8::ArrayBuffer::Allocator::Free",
- "Override Allocator::Free(void*, size_t)");
-}
-
-
void v8::ArrayBufferView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArrayBufferView(),
@@ -3229,7 +3084,7 @@ double Value::NumberValue() const {
LOG_API(isolate, "NumberValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
+ num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
}
return num->Number();
@@ -3247,7 +3102,7 @@ int64_t Value::IntegerValue() const {
LOG_API(isolate, "IntegerValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
+ num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
}
if (num->IsSmi()) {
@@ -3269,7 +3124,7 @@ Local<Int32> Value::ToInt32() const {
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInt32(obj, &has_pending_exception);
+ num = i::Execution::ToInt32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
}
return ToApiHandle<Int32>(num);
@@ -3287,7 +3142,7 @@ Local<Uint32> Value::ToUint32() const {
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToUint32(obj, &has_pending_exception);
+ num = i::Execution::ToUint32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
}
return ToApiHandle<Uint32>(num);
@@ -3306,7 +3161,7 @@ Local<Uint32> Value::ToArrayIndex() const {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> string_obj =
- i::Execution::ToString(obj, &has_pending_exception);
+ i::Execution::ToString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
@@ -3334,7 +3189,7 @@ int32_t Value::Int32Value() const {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
- i::Execution::ToInt32(obj, &has_pending_exception);
+ i::Execution::ToInt32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -3415,7 +3270,7 @@ uint32_t Value::Uint32Value() const {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
- i::Execution::ToUint32(obj, &has_pending_exception);
+ i::Execution::ToUint32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -3535,7 +3390,7 @@ Local<Value> v8::Object::Get(uint32_t index) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::Object::GetElement(self, index);
+ i::Handle<i::Object> result = i::Object::GetElement(isolate, self, index);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -3552,7 +3407,7 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
EXCEPTION_PREAMBLE(isolate);
- key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
+ key_obj = i::Execution::ToString(isolate, key_obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
@@ -3762,7 +3617,7 @@ bool v8::Object::Delete(uint32_t index) {
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::DeleteElement(self, index)->IsTrue();
+ return i::JSReceiver::DeleteElement(self, index)->IsTrue();
}
@@ -3791,7 +3646,8 @@ static inline bool ObjectSetAccessor(Object* obj,
name, getter, setter, data, settings, attributes, signature);
if (info.is_null()) return false;
bool fast = Utils::OpenHandle(obj)->HasFastProperties();
- i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(obj), info);
+ i::Handle<i::Object> result =
+ i::JSObject::SetAccessor(Utils::OpenHandle(obj), info);
if (result.is_null() || result->IsUndefined()) return false;
if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0);
return true;
@@ -3799,17 +3655,6 @@ static inline bool ObjectSetAccessor(Object* obj,
bool Object::SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes) {
- return ObjectSetAccessor(
- this, name, getter, setter, data, settings, attributes);
-}
-
-
-bool Object::SetAccessor(Handle<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
v8::Handle<Value> data,
@@ -3820,10 +3665,10 @@ bool Object::SetAccessor(Handle<String> name,
}
-bool Object::SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
- AccessControl settings,
- PropertyAttribute attributes) {
+bool Object::SetDeclaredAccessor(Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attributes,
+ AccessControl settings) {
void* null = NULL;
return ObjectSetAccessor(
this, name, descriptor, null, null, settings, attributes);
@@ -4053,7 +3898,7 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- self->DeleteHiddenProperty(*key_string);
+ i::JSObject::DeleteHiddenProperty(self, key_string);
return true;
}
@@ -4264,7 +4109,7 @@ bool v8::Object::IsCallable() {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (obj->IsJSFunction()) return true;
- return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
+ return i::Execution::GetFunctionDelegate(isolate, obj)->IsJSFunction();
}
@@ -4288,15 +4133,15 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
fun = i::Handle<i::JSFunction>::cast(obj);
} else {
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
+ i::Handle<i::Object> delegate = i::Execution::TryGetFunctionDelegate(
+ isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
fun = i::Handle<i::JSFunction>::cast(delegate);
recv_obj = obj;
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -4325,14 +4170,14 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::Handle<i::JSObject>::cast(returned)));
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
+ i::Handle<i::Object> delegate = i::Execution::TryGetConstructorDelegate(
+ isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
if (!delegate->IsUndefined()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
ASSERT(!delegate->IsUndefined());
return Utils::ToLocal(scope.CloseAndEscape(returned));
@@ -4341,6 +4186,19 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
}
+Local<Function> Function::New(Isolate* v8_isolate,
+ FunctionCallback callback,
+ Local<Value> data,
+ int length) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ LOG_API(isolate, "Function::New");
+ ENTER_V8(isolate);
+ return FunctionTemplateNew(
+ isolate, callback, data, Local<Signature>(), length, true)->
+ GetFunction();
+}
+
+
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@@ -4383,8 +4241,8 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
@@ -5322,8 +5180,8 @@ bool v8::V8::Initialize() {
}
-void v8::V8::SetEntropySource(EntropySource source) {
- i::V8::SetEntropySource(source);
+void v8::V8::SetEntropySource(EntropySource entropy_source) {
+ i::RandomNumberGenerator::SetEntropySource(entropy_source);
}
@@ -5468,7 +5326,8 @@ bool v8::V8::IdleNotification(int hint) {
// continue to call IdleNotification.
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return true;
- return i::V8::IdleNotification(hint);
+ if (!i::FLAG_use_idle_notification) return true;
+ return isolate->heap()->IdleNotification(hint);
}
@@ -5496,18 +5355,6 @@ const char* v8::V8::GetVersion() {
}
-static i::Handle<i::FunctionTemplateInfo>
- EnsureConstructor(i::Handle<i::ObjectTemplateInfo> templ) {
- if (templ->constructor()->IsUndefined()) {
- Local<FunctionTemplate> constructor = FunctionTemplate::New();
- Utils::OpenHandle(*constructor)->set_instance_template(*templ);
- templ->set_constructor(*Utils::OpenHandle(*constructor));
- }
- return i::Handle<i::FunctionTemplateInfo>(
- i::FunctionTemplateInfo::cast(templ->constructor()));
-}
-
-
static i::Handle<i::Context> CreateEnvironment(
i::Isolate* isolate,
v8::ExtensionConfiguration* extensions,
@@ -5524,13 +5371,11 @@ static i::Handle<i::Context> CreateEnvironment(
if (!global_template.IsEmpty()) {
// Make sure that the global_template has a constructor.
- global_constructor =
- EnsureConstructor(Utils::OpenHandle(*global_template));
+ global_constructor = EnsureConstructor(*global_template);
// Create a fresh template for the global proxy object.
proxy_template = ObjectTemplate::New();
- proxy_constructor =
- EnsureConstructor(Utils::OpenHandle(*proxy_template));
+ proxy_constructor = EnsureConstructor(*proxy_template);
// Set the global template to be the prototype template of
// global proxy template.
@@ -5573,26 +5418,6 @@ static i::Handle<i::Context> CreateEnvironment(
return env;
}
-#ifdef V8_USE_UNSAFE_HANDLES
-Persistent<Context> v8::Context::New(
- v8::ExtensionConfiguration* extensions,
- v8::Handle<ObjectTemplate> global_template,
- v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- Isolate* external_isolate = reinterpret_cast<Isolate*>(isolate);
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
- LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
- i::HandleScope scope(isolate);
- i::Handle<i::Context> env =
- CreateEnvironment(isolate, extensions, global_template, global_object);
- if (env.is_null()) return Persistent<Context>();
- return Persistent<Context>::New(external_isolate, Utils::ToLocal(env));
-}
-#endif
-
-
Local<Context> v8::Context::New(
v8::Isolate* external_isolate,
v8::ExtensionConfiguration* extensions,
@@ -6235,7 +6060,7 @@ Local<v8::Value> v8::Date::New(double time) {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj =
- i::Execution::NewDate(time, &has_pending_exception);
+ i::Execution::NewDate(isolate, time, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
@@ -6937,47 +6762,6 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
}
-void V8::PauseProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->PauseProfiler();
-}
-
-
-void V8::ResumeProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->ResumeProfiler();
-}
-
-
-bool V8::IsProfilerPaused() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->logger()->IsProfilerPaused();
-}
-
-
-int V8::GetCurrentThreadId() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
- return isolate->thread_id().ToInteger();
-}
-
-
-void V8::TerminateExecution(int thread_id) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
- // If the thread_id identifies the current thread just terminate
- // execution right away. Otherwise, ask the thread manager to
- // terminate the thread with the given id if any.
- i::ThreadId internal_tid = i::ThreadId::FromInteger(thread_id);
- if (isolate->thread_id().Equals(internal_tid)) {
- isolate->stack_guard()->TerminateExecution();
- } else {
- isolate->thread_manager()->TerminateExecution(internal_tid);
- }
-}
-
-
void V8::TerminateExecution(Isolate* isolate) {
// If no isolate is supplied, use the default isolate.
if (isolate != NULL) {
@@ -7211,37 +6995,6 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->debug_event_callback() != NULL) {
- isolate->debug_event_callback()(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
- }
-}
-
-
-bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
-
- isolate->set_debug_event_callback(that);
-
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign =
- isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
- }
- isolate->debugger()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
@@ -7301,35 +7054,6 @@ void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
}
-static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->message_handler()) {
- v8::String::Value json(message.GetJSON());
- (isolate->message_handler())(*json, json.length(), message.GetClientData());
- }
-}
-
-
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
- bool message_handler_thread) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
-
- // Message handler thread not supported any more. Parameter temporally left in
- // the API for client compatibility reasons.
- CHECK(!message_handler_thread);
-
- // TODO(sgjesse) support the old message handler API through a simple wrapper.
- isolate->set_message_handler(handler);
- if (handler != NULL) {
- isolate->debugger()->SetMessageHandler(MessageHandlerWrapper);
- } else {
- isolate->debugger()->SetMessageHandler(NULL);
- }
-}
-
-
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
@@ -7358,7 +7082,8 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
ENTER_V8(isolate);
- isolate->debugger()->SetHostDispatchHandler(handler, period);
+ isolate->debugger()->SetHostDispatchHandler(
+ handler, i::TimeDelta::FromMilliseconds(period));
}
@@ -7432,7 +7157,7 @@ void Debug::DisableAgent() {
void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMessages(true);
+ i::Execution::ProcessDebugMessages(i::Isolate::Current(), true);
}
@@ -7497,24 +7222,9 @@ int CpuProfileNode::GetLineNumber() const {
}
-double CpuProfileNode::GetTotalTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
-}
-
-
-double CpuProfileNode::GetSelfTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
-}
-
-
-double CpuProfileNode::GetTotalSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
+const char* CpuProfileNode::GetBailoutReason() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->bailout_reason();
}
@@ -7593,13 +7303,13 @@ const CpuProfileNode* CpuProfile::GetSample(int index) const {
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return profile->start_time_us();
+ return (profile->start_time() - i::Time::UnixEpoch()).InMicroseconds();
}
int64_t CpuProfile::GetEndTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return profile->end_time_us();
+ return (profile->end_time() - i::Time::UnixEpoch()).InMicroseconds();
}
@@ -7613,6 +7323,13 @@ int CpuProfiler::GetProfileCount() {
}
+void CpuProfiler::SetSamplingInterval(int us) {
+ ASSERT(us >= 0);
+ return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
+ i::TimeDelta::FromMicroseconds(us));
+}
+
+
const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
@@ -8128,20 +7845,6 @@ void DeferredHandles::Iterate(ObjectVisitor* v) {
}
-v8::Handle<v8::Value> InvokeAccessorGetter(
- v8::Local<v8::String> property,
- const v8::AccessorInfo& info,
- v8::AccessorGetter getter) {
- Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
- getter));
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, getter_address);
- return getter(property, info);
-}
-
-
void InvokeAccessorGetterCallback(
v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
@@ -8152,19 +7855,7 @@ void InvokeAccessorGetterCallback(
getter));
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, getter_address);
- return getter(property, info);
-}
-
-
-v8::Handle<v8::Value> InvokeInvocationCallback(
- const v8::Arguments& args,
- v8::InvocationCallback callback) {
- Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
- Address callback_address =
- reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, callback_address);
- return callback(args);
+ getter(property, info);
}
@@ -8175,7 +7866,7 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, callback_address);
- return callback(info);
+ callback(info);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 0f33bc815..51bc4942b 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -125,8 +125,8 @@ template <typename T> inline T ToCData(v8::internal::Object* obj) {
template <typename T>
-inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
- v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+inline v8::internal::Handle<v8::internal::Object> FromCData(
+ v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@@ -690,19 +690,11 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
// Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked.
-v8::Handle<v8::Value> InvokeAccessorGetter(
- v8::Local<v8::String> property,
- const v8::AccessorInfo& info,
- v8::AccessorGetter getter);
-
-
void InvokeAccessorGetterCallback(
v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
v8::AccessorGetterCallback getter);
-v8::Handle<v8::Value> InvokeInvocationCallback(const v8::Arguments& args,
- v8::InvocationCallback callback);
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback);
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index ccbedd6d3..5fb36c09e 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -74,25 +74,31 @@ function InstantiateFunction(data, name) {
cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
- cache[serialNumber] = fun;
- var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
var flags = %GetTemplateField(data, kApiFlagOffset);
- // Note: Do not directly use an object template as a condition, our
- // internal ToBoolean doesn't handle that!
- fun.prototype = typeof prototype === 'undefined' ?
- {} : Instantiate(prototype);
- if (flags & (1 << kReadOnlyPrototypeBit)) {
- %FunctionSetReadOnlyPrototype(fun);
- }
- %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
- var parent = %GetTemplateField(data, kApiParentTemplateOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- if (!(typeof parent === 'undefined')) {
- var parent_fun = Instantiate(parent);
- %SetPrototype(fun.prototype, parent_fun.prototype);
+ var doNotCache = flags & (1 << kDoNotCacheBit);
+ if (!doNotCache) cache[serialNumber] = fun;
+ if (flags & (1 << kRemovePrototypeBit)) {
+ %FunctionRemovePrototype(fun);
+ } else {
+ var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+ // Note: Do not directly use an object template as a condition, our
+ // internal ToBoolean doesn't handle that!
+ fun.prototype = typeof prototype === 'undefined' ?
+ {} : Instantiate(prototype);
+ if (flags & (1 << kReadOnlyPrototypeBit)) {
+ %FunctionSetReadOnlyPrototype(fun);
+ }
+ %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+ var parent = %GetTemplateField(data, kApiParentTemplateOffset);
+ // Note: Do not directly use a function template as a condition, our
+ // internal ToBoolean doesn't handle that!
+ if (!(typeof parent === 'undefined')) {
+ var parent_fun = Instantiate(parent);
+ %SetPrototype(fun.prototype, parent_fun.prototype);
+ }
}
ConfigureTemplateInstance(fun, data);
+ if (doNotCache) return fun;
} catch (e) {
cache[serialNumber] = kUninitialized;
throw e;
@@ -104,19 +110,32 @@ function InstantiateFunction(data, name) {
function ConfigureTemplateInstance(obj, data) {
var properties = %GetTemplateField(data, kApiPropertyListOffset);
- if (properties) {
- // Disable access checks while instantiating the object.
- var requires_access_checks = %DisableAccessChecks(obj);
- try {
- for (var i = 0; i < properties[0]; i += 3) {
+ if (!properties) return;
+ // Disable access checks while instantiating the object.
+ var requires_access_checks = %DisableAccessChecks(obj);
+ try {
+ for (var i = 1; i < properties[0];) {
+ var length = properties[i];
+ if (length == 3) {
var name = properties[i + 1];
var prop_data = properties[i + 2];
var attributes = properties[i + 3];
var value = Instantiate(prop_data, name);
%SetProperty(obj, name, value, attributes);
+ } else if (length == 5) {
+ var name = properties[i + 1];
+ var getter = properties[i + 2];
+ var setter = properties[i + 3];
+ var attribute = properties[i + 4];
+ var access_control = properties[i + 5];
+ %SetAccessorProperty(
+ obj, name, getter, setter, attribute, access_control);
+ } else {
+ throw "Bad properties array";
}
- } finally {
- if (requires_access_checks) %EnableAccessChecks(obj);
+ i += length + 1;
}
+ } finally {
+ if (requires_access_checks) %EnableAccessChecks(obj);
}
}
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index 11d9279e8..287805717 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -34,49 +34,6 @@ namespace v8 {
namespace internal {
-static bool Match(void* a, void* b) {
- return a == b;
-}
-
-
-static uint32_t Hash(void* function) {
- uintptr_t as_int = reinterpret_cast<uintptr_t>(function);
- if (sizeof(function) == 4) return static_cast<uint32_t>(as_int);
- uint64_t as_64 = static_cast<uint64_t>(as_int);
- return
- static_cast<uint32_t>(as_64 >> 32) ^
- static_cast<uint32_t>(as_64);
-}
-
-
-CallbackTable::CallbackTable(): map_(Match, 64) {}
-
-
-bool CallbackTable::Contains(void* function) {
- ASSERT(function != NULL);
- return map_.Lookup(function, Hash(function), false) != NULL;
-}
-
-
-void CallbackTable::InsertCallback(Isolate* isolate,
- void* function,
- bool returns_void) {
- if (function == NULL) return;
- // Don't store for performance.
- if (kStoreVoidFunctions != returns_void) return;
- CallbackTable* table = isolate->callback_table();
- if (table == NULL) {
- table = new CallbackTable();
- isolate->set_callback_table(table);
- }
- typedef HashMap::Entry Entry;
- Entry* entry = table->map_.Lookup(function, Hash(function), true);
- ASSERT(entry != NULL);
- ASSERT(entry->value == NULL || entry->value == function);
- entry->value = function;
-}
-
-
template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
@@ -88,110 +45,67 @@ v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
}
-v8::Handle<v8::Value> FunctionCallbackArguments::Call(InvocationCallback f) {
+v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f);
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- if (new_style) {
- FunctionCallback c = reinterpret_cast<FunctionCallback>(f);
- FunctionCallbackInfo<v8::Value> info(end(),
- argv_,
- argc_,
- is_construct_call_);
- c(info);
- } else {
- v8::Arguments args(end(),
- argv_,
- argc_,
- is_construct_call_);
- v8::Handle<v8::Value> return_value = f(args);
- if (!return_value.IsEmpty()) return return_value;
- }
+ FunctionCallbackInfo<v8::Value> info(end(),
+ argv_,
+ argc_,
+ is_construct_call_);
+ f(info);
return GetReturnValue<v8::Value>(isolate);
}
-#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f) { \
+#define WRITE_CALL_0(Function, ReturnValue) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(info); \
- } else { \
- v8::AccessorInfo info(end()); \
- v8::Handle<ReturnValue> return_value = f(info); \
- if (!return_value.IsEmpty()) return return_value; \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
-#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
+
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(arg1, info); \
- } else { \
- v8::AccessorInfo info(end()); \
- v8::Handle<ReturnValue> return_value = f(arg1, info); \
- if (!return_value.IsEmpty()) return return_value; \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
-#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
+
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(arg1, arg2, info); \
- } else { \
- v8::AccessorInfo info(end()); \
- v8::Handle<ReturnValue> return_value = f(arg1, arg2, info); \
- if (!return_value.IsEmpty()) return return_value; \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
-#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
-void PropertyCallbackArguments::Call(OldFunction f, \
+
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+void PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(arg1, arg2, info); \
- } else { \
- v8::AccessorInfo info(end()); \
- f(arg1, arg2, info); \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(arg1, arg2, info); \
}
+
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index f9dca110c..c1db98b53 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -83,116 +83,49 @@ class Arguments BASE_EMBEDDED {
};
-// mappings from old property callbacks to new ones
-// F(old name, new name, return value, parameters...)
-//
+// For each type of callback, we have a list of arguments
+// They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures
-// F(NamedPropertyEnumerator, NamedPropertyEnumeratorCallback, ...)
-// F(NamedPropertyGetter, NamedPropertyGetterCallback, ...)
+// F(NamedPropertyEnumeratorCallback, ...)
+// F(NamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
- F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback, v8::Array) \
+ F(IndexedPropertyEnumeratorCallback, v8::Array) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
- F(AccessorGetter, AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
- F(NamedPropertyQuery, \
- NamedPropertyQueryCallback, \
+ F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
+ F(NamedPropertyQueryCallback, \
v8::Integer, \
v8::Local<v8::String>) \
- F(NamedPropertyDeleter, \
- NamedPropertyDeleterCallback, \
+ F(NamedPropertyDeleterCallback, \
v8::Boolean, \
v8::Local<v8::String>) \
- F(IndexedPropertyGetter, \
- IndexedPropertyGetterCallback, \
+ F(IndexedPropertyGetterCallback, \
v8::Value, \
uint32_t) \
- F(IndexedPropertyQuery, \
- IndexedPropertyQueryCallback, \
+ F(IndexedPropertyQueryCallback, \
v8::Integer, \
uint32_t) \
- F(IndexedPropertyDeleter, \
- IndexedPropertyDeleterCallback, \
+ F(IndexedPropertyDeleterCallback, \
v8::Boolean, \
uint32_t) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
- F(NamedPropertySetter, \
- NamedPropertySetterCallback, \
+ F(NamedPropertySetterCallback, \
v8::Value, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
- F(IndexedPropertySetter, \
- IndexedPropertySetterCallback, \
+ F(IndexedPropertySetterCallback, \
v8::Value, \
uint32_t, \
v8::Local<v8::Value>) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
- F(AccessorSetter, \
- AccessorSetterCallback, \
+ F(AccessorSetterCallback, \
void, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
-// All property callbacks as well as invocation callbacks
-#define FOR_EACH_CALLBACK_TABLE_MAPPING(F) \
- F(InvocationCallback, FunctionCallback) \
- F(AccessorGetter, AccessorGetterCallback) \
- F(AccessorSetter, AccessorSetterCallback) \
- F(NamedPropertySetter, NamedPropertySetterCallback) \
- F(NamedPropertyQuery, NamedPropertyQueryCallback) \
- F(NamedPropertyDeleter, NamedPropertyDeleterCallback) \
- F(IndexedPropertyGetter, IndexedPropertyGetterCallback) \
- F(IndexedPropertySetter, IndexedPropertySetterCallback) \
- F(IndexedPropertyQuery, IndexedPropertyQueryCallback) \
- F(IndexedPropertyDeleter, IndexedPropertyDeleterCallback) \
- F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback) \
-
-
-// TODO(dcarney): Remove this class when old callbacks are gone.
-class CallbackTable {
- public:
- static const bool kStoreVoidFunctions = false;
- static inline bool ReturnsVoid(Isolate* isolate, void* function) {
- CallbackTable* table = isolate->callback_table();
- bool contains =
- table != NULL &&
- table->map_.occupancy() != 0 &&
- table->Contains(function);
- return contains == kStoreVoidFunctions;
- }
-
- STATIC_ASSERT(sizeof(intptr_t) == sizeof(AccessorGetterCallback));
-
- template<typename F>
- static inline void* FunctionToVoidPtr(F function) {
- return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(function));
- }
-
-#define WRITE_REGISTER(OldFunction, NewFunction) \
- static NewFunction Register(Isolate* isolate, OldFunction f) { \
- InsertCallback(isolate, FunctionToVoidPtr(f), false); \
- return reinterpret_cast<NewFunction>(f); \
- } \
- \
- static NewFunction Register(Isolate* isolate, NewFunction f) { \
- InsertCallback(isolate, FunctionToVoidPtr(f), true); \
- return f; \
- }
- FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER)
-#undef WRITE_REGISTER
-
- private:
- CallbackTable();
- bool Contains(void* function);
- static void InsertCallback(Isolate* isolate,
- void* function,
- bool returns_void);
- HashMap map_;
- DISALLOW_COPY_AND_ASSIGN(CallbackTable);
-};
-
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
@@ -218,7 +151,6 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
- // TODO(dcarney): create a new zap value for this.
this->end()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
@@ -243,6 +175,10 @@ class PropertyCallbackArguments
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
PropertyCallbackArguments(Isolate* isolate,
Object* data,
@@ -271,17 +207,17 @@ class PropertyCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
-#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
- v8::Handle<ReturnValue> Call(OldFunction f); \
+#define WRITE_CALL_0(Function, ReturnValue) \
+ v8::Handle<ReturnValue> Call(Function f); \
-#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
- v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1); \
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+ v8::Handle<ReturnValue> Call(Function f, Arg1 arg1); \
-#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
- v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+ v8::Handle<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2); \
-#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
- void Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+ void Call(Function f, Arg1 arg1, Arg2 arg2); \
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
@@ -336,7 +272,7 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- v8::Handle<v8::Value> Call(InvocationCallback f);
+ v8::Handle<v8::Value> Call(FunctionCallback f);
private:
internal::Object** argv_;
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index bfe9bc833..a1d1e1b56 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -279,7 +279,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -292,12 +292,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index a9db5a599..bd8b0613e 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -39,6 +39,7 @@
#if V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
+#include "macro-assembler.h"
#include "serialize.h"
namespace v8 {
@@ -152,7 +153,8 @@ void CpuFeatures::Probe() {
#else // __arm__
// Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) {
+ CPU cpu;
+ if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
@@ -161,38 +163,40 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
+ if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
found_by_runtime_probing_only_ |= 1u << NEON;
}
- if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
+ if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) {
+ if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
}
if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
- && OS::ArmCpuHasFeature(ARMv7)) {
+ && cpu.architecture() >= 7) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
- CpuImplementer implementer = OS::GetCpuImplementer();
- if (implementer == QUALCOMM_IMPLEMENTER &&
- FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
+ // Use movw/movt for QUALCOMM ARMv7 cores.
+ if (cpu.implementer() == CPU::QUALCOMM &&
+ cpu.architecture() >= 7 &&
+ FLAG_enable_movw_movt) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
- CpuPart part = OS::GetCpuPart(implementer);
- if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
+ // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
+ if (cpu.implementer() == CPU::ARM &&
+ (cpu.part() == CPU::ARM_CORTEX_A5 ||
+ cpu.part() == CPU::ARM_CORTEX_A9)) {
cache_line_size_ = 32;
}
- if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
- && OS::ArmCpuHasFeature(VFP32DREGS)) {
+ if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
@@ -321,15 +325,12 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-arm-inl.h for inlined constructors
Operand::Operand(Handle<Object> handle) {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
-#endif
AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -775,9 +776,9 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (is_uint24(instr)) {
+ // Emitted link to a label, not part of a branch.
+ return instr;
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
@@ -792,11 +793,72 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
+ if (is_uint24(instr)) {
ASSERT(target_pos == pos || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ // Emitted link to a label, not part of a branch.
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+
+ // Here are the instructions we need to emit:
+ // For ARMv7: target24 => target16_1:target16_0
+ // movw dst, #target16_0
+ // movt dst, #target16_1
+ // For ARMv6: target24 => target8_2:target8_1:target8_0
+ // mov dst, #target8_0
+ // orr dst, dst, #target8_1 << 8
+ // orr dst, dst, #target8_2 << 16
+
+ // We extract the destination register from the emitted nop instruction.
+ Register dst = Register::from_code(
+ Instruction::RmValue(instr_at(pos + kInstrSize)));
+ ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
+ uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ ASSERT(is_uint24(target24));
+ if (is_uint8(target24)) {
+ // If the target fits in a byte then only patch with a mov
+ // instruction.
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target24));
+ } else {
+ uint16_t target16_0 = target24 & kImm16Mask;
+ uint16_t target16_1 = target24 >> 16;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Patch with movw/movt.
+ if (target16_1 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ patcher.masm()->movt(dst, target16_1);
+ }
+ } else {
+ // Patch with a sequence of mov/orr/orr instructions.
+ uint8_t target8_0 = target16_0 & kImm8Mask;
+ uint8_t target8_1 = target16_0 >> 8;
+ uint8_t target8_2 = target16_1 & kImm8Mask;
+ if (target8_2 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 3,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
+ }
+ }
+ }
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
@@ -1229,21 +1291,6 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- ASSERT(!L->is_bound());
- if (L->is_linked()) {
- // Point to previous instruction that uses the link.
- target_pos = L->pos();
- } else {
- // First entry of the link chain points to itself.
- target_pos = at_offset;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
-}
-
-
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
@@ -1386,6 +1433,45 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
+void Assembler::mov_label_offset(Register dst, Label* label) {
+ if (label->is_bound()) {
+ mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
+ } else {
+ // Emit the link to the label in the code stream followed by extra nop
+ // instructions.
+ // If the label is not linked, then start a new link chain by linking it to
+ // itself, emitting pc_offset().
+ int link = label->is_linked() ? label->pos() : pc_offset();
+ label->link_to(pc_offset());
+
+ // When the label is bound, these instructions will be patched with a
+ // sequence of movw/movt or mov/orr/orr instructions. They will load the
+ // destination register with the position of the label from the beginning
+ // of the code.
+ //
+ // The link will be extracted from the first instruction and the destination
+ // register from the second.
+ // For ARMv7:
+ // link
+ // mov dst, dst
+ // For ARMv6:
+ // link
+ // mov dst, dst
+ // mov dst, dst
+ //
+ // When the label gets bound: target_at extracts the link and target_at_put
+ // patches the instructions.
+ ASSERT(is_uint24(link));
+ BlockConstPoolScope block_const_pool(this);
+ emit(link);
+ nop(dst.code());
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ nop(dst.code());
+ }
+ }
+}
+
+
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
// May use movw if supported, but on unsupported platforms will try to use
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index f647848de..866b1c902 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -748,10 +748,6 @@ class Assembler : public AssemblerBase {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
@@ -903,6 +899,10 @@ class Assembler : public AssemblerBase {
mov(dst, Operand(src), s, cond);
}
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ void mov_label_offset(Register dst, Label* label);
+
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
// This may actually emit a different mov instruction, but on an ARMv7 it
// is guaranteed to only emit one instruction.
@@ -1561,7 +1561,6 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(double data);
void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
- friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 5f3a999f5..f60e1f867 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -291,68 +291,55 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+ // Function is also the parameter to the runtime call.
+ __ push(r1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mov(pc, r2);
+ __ Jump(r2);
}
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r0);
+
+ __ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- __ push(r1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(r5);
- // Restore receiver.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -795,59 +782,17 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@@ -966,31 +911,48 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
+ // Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sub(r1, r1, r2);
+ __ SmiTag(r1);
+
+ // Pass both function and pc offset as arguments.
__ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(r1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
+ // If the code object is null, just return to the unoptimized code.
Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
+ __ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 5858eac62..cd1809fb2 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -38,6 +38,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -309,134 +320,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ ldr(r1,
- FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ tst(r1, r1);
- __ b(ne, &check_optimized);
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
-
- // r2 holds native context, r1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into r4.
- __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
- __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
- __ cmp(r2, r5);
- __ b(eq, &install_optimized);
-
- // Iterate through the rest of map backwards. r4 holds an index as a Smi.
- Label loop;
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
- __ b(eq, &install_unoptimized);
- __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
- __ ldr(r5, MemOperand(r5));
- __ cmp(r2, r5);
- __ b(ne, &loop);
- // Hit: fetch the optimized code.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
- __ add(r5, r5, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r5));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, r6, r7);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(r4, r0);
- __ RecordWriteContextSlot(
- r2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- r4,
- r1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -634,7 +517,112 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
+
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+
+ // Immediate values for this stub fit in instructions, so it's safe to use ip.
+ Register scratch = ip;
+ Register scratch_low =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch_high =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+
+ __ Push(scratch_high, scratch_low);
+
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ vldr(double_scratch, MemOperand(input_reg, double_offset));
+ __ vmov(scratch_low, scratch_high, double_scratch);
+
+ // Do fast-path convert from double to int.
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ __ vmov(result_reg, double_scratch.low());
+
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ __ sub(scratch, result_reg, Operand(1));
+ __ cmp(scratch, Operand(0x7ffffffe));
+ __ b(lt, &done);
+ } else {
+ // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
+ // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
+ if (double_offset == 0) {
+ __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
+ } else {
+ __ ldr(scratch_low, MemOperand(input_reg, double_offset));
+ __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
+ }
+ }
+
+ __ Ubfx(scratch, scratch_high,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // Load scratch with exponent - 1. This is faster than loading
+ // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
+ // If exponent is greater than or equal to 84, the 32 less significant
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+ // the result is 0.
+ // Compare exponent with 84 (compare exponent - 1 with 83).
+ __ cmp(scratch, Operand(83));
+ __ b(ge, &out_of_range);
+
+ // If we reach this code, 31 <= exponent <= 83.
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for
+ // which we would need to shift right the high part of the mantissa.
+ // Scratch contains exponent - 1.
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+ __ rsb(scratch, scratch, Operand(51), SetCC);
+ __ b(ls, &only_low);
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high
+ // to generate the result.
+ __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
+ // Scratch contains: 52 - exponent.
+ // We needs: exponent - 20.
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+ __ rsb(scratch, scratch, Operand(32));
+ __ Ubfx(result_reg, scratch_high,
+ 0, HeapNumber::kMantissaBitsInTopWord);
+ // Set the implicit 1 before the mantissa part in scratch_high.
+ __ orr(result_reg, result_reg,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
+ __ b(&negate);
+
+ __ bind(&out_of_range);
+ __ mov(result_reg, Operand::Zero());
+ __ b(&done);
+
+ __ bind(&only_low);
+ // 52 <= exponent <= 83, shift only scratch_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ rsb(scratch, scratch, Operand::Zero());
+ __ mov(result_reg, Operand(scratch_low, LSL, scratch));
+
+ __ bind(&negate);
+ // If input was positive, scratch_high ASR 31 equals 0 and
+ // scratch_high LSR 31 equals zero.
+ // New result = (result eor 0) + 0 = result.
+ // If the input was negative, we have to negate the result.
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
+ __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
+
+ __ bind(&done);
+
+ __ Pop(scratch_high, scratch_low);
+ __ Ret();
+}
+
+
+bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
@@ -1591,7 +1579,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
Register right = r0;
Register scratch1 = r6;
Register scratch2 = r7;
- Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands) {
@@ -1689,12 +1676,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ SmiUntag(r2, right);
} else {
// Convert operands to 32-bit integers. Right in r2 and left in r3.
- __ ConvertNumberToInt32(
- left, r3, heap_number_map,
- scratch1, scratch2, scratch3, d0, d1, not_numbers);
- __ ConvertNumberToInt32(
- right, r2, heap_number_map,
- scratch1, scratch2, scratch3, d0, d1, not_numbers);
+ __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
+ __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
}
Label result_not_a_smi;
@@ -2508,16 +2491,6 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;
@@ -2721,8 +2694,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@@ -5817,7 +5790,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ b(lt, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -5826,26 +5798,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
- &not_cached);
+ slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
__ bind(&done);
}
@@ -6170,6 +6125,11 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ str(lr, MemOperand(sp, 0));
+ __ blx(ip); // Call the C++ function.
+ __ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
@@ -6178,21 +6138,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t code =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Move(ip, target);
__ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
-
- // Prevent literal pool emission during calculation of return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
-
- // Push return address (accessible to GC through exit frame pc).
- // Note that using pc with str is deprecated.
- Label start;
- __ bind(&start);
- __ add(ip, pc, Operand(Assembler::kInstrSize));
- __ str(ip, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
- ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
- masm->SizeOfCodeGeneratedSince(&start));
- __ VFPEnsureFPSCRState(r2);
+ __ blx(lr); // Call the stub.
}
@@ -6458,8 +6406,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
{ REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
@@ -6491,7 +6437,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6870,6 +6816,9 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
@@ -6888,90 +6837,128 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(r3, Operand(kind));
- __ b(ne, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ __ b(ne, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // r2 - type info cell
- // r3 - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// r0 - number of arguments
// r1 - constructor?
// sp[0] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ tst(r3, Operand(1));
Label normal_sequence;
- __ b(ne, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ tst(r3, Operand(1));
+ __ b(ne, &normal_sequence);
+ }
// look at the first argument
__ ldr(r5, MemOperand(sp, 0));
__ cmp(r5, Operand::Zero());
__ b(eq, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ add(r3, r3, Operand(1));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &normal_sequence);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
- __ ldr(r5, FieldMemOperand(r5, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ SmiTag(r3);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
- __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(r3);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(r3, Operand(kind));
- __ b(ne, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ add(r3, r3, Operand(1));
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+
+ if (FLAG_debug_code) {
+ __ ldr(r5, FieldMemOperand(r5, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSiteInCell);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // Save the resulting elements kind in type info
+ __ SmiTag(r3);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r3);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ __ b(ne, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7004,6 +6991,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
@@ -7035,50 +7050,24 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
- Label no_info, switch_ready;
+ Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
- // The type cell may have undefined in its value.
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ ldr(r4, FieldMemOperand(r3, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ mov(r3, Operand(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 6eab8d128..d05e9a1d8 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -68,7 +68,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -232,7 +232,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
- bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -305,7 +305,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -376,7 +376,7 @@ class RecordWriteStub: public PlatformCodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@@ -419,19 +419,6 @@ class RecordWriteStub: public PlatformCodeStub {
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -478,23 +465,6 @@ class RecordWriteStub: public PlatformCodeStub {
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index c020ab601..54530d872 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -44,8 +44,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -61,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 9bfccf822..703613932 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -220,6 +220,8 @@ enum {
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
+ kImm16Mask = (1 << 16) - 1,
+ kImm8Mask = (1 << 8) - 1,
kOff12Mask = (1 << 12) - 1,
kOff8Mask = (1 << 8) - 1
};
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 8766a24bb..cf531e129 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -106,15 +106,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#if !defined (__arm__)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 108435f0a..efd11069b 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -55,7 +55,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
@@ -95,7 +96,8 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry());
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 5b42116ad..3c57b6439 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -101,12 +101,7 @@ static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -125,12 +120,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -150,10 +140,10 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
@@ -164,185 +154,27 @@ bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return true;
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
- ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
+ ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -555,11 +387,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
- }
-
+ __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
+ __ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index ecdf638a1..acffaa3f2 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -50,9 +50,6 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
#include "v8.h"
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index b73006a17..b6fb70b5d 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -296,8 +296,7 @@ void FullCodeGenerator::Generate() {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -366,8 +365,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -416,8 +414,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(r0);
EmitProfilingCounterReset();
@@ -1330,8 +1328,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ mov(r0, Operand(info));
- __ push(r0);
+ __ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
@@ -3010,7 +3007,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -3022,7 +3019,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
__ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
+ __ b(ne, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -3068,6 +3065,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ b(ne, &loop);
__ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
@@ -3077,16 +3082,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3320,7 +3318,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 511a3c74f..f15d4b11f 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -354,7 +354,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
@@ -393,7 +393,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
@@ -658,7 +658,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
@@ -1490,7 +1490,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index fe299abfe..59a8818ac 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -260,6 +261,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -425,6 +434,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -718,12 +736,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseRegisterAtStart(right_value);
@@ -735,12 +748,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -1089,6 +1097,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1505,20 +1521,39 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left;
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->BetterLeftOperand());
- temp = TempRegister();
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->BetterLeftOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
@@ -1689,9 +1724,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, d7), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, d7);
}
@@ -1912,19 +1951,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- value = UseRegisterAtStart(instr->value());
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(instr->value());
+ value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(d11);
+ LOperand* temp2 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
- temp2,
- temp3));
+ temp2));
res = AssignEnvironment(res);
}
return res;
@@ -1944,14 +1981,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(result);
} else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
- TempRegister(), TempRegister())));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+ LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -2018,9 +2053,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
@@ -2418,10 +2453,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2443,6 +2486,8 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
// There are no real uses of a captured object.
return NULL;
}
@@ -2489,20 +2534,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index d81dc0f57..98cacacae 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -62,12 +62,12 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -162,6 +162,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -189,13 +190,17 @@ class LCodeGen;
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -205,7 +210,7 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
@@ -214,7 +219,7 @@ class LInstruction: public ZoneObject {
set_position(RelocInfo::kNoPosition);
}
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -313,11 +318,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -327,15 +334,15 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, T> temps_;
private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -346,8 +353,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -383,11 +390,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -395,14 +402,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -411,7 +418,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -427,7 +434,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -436,22 +443,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -467,14 +476,14 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -485,19 +494,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -536,7 +547,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -550,7 +561,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -571,7 +582,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -585,11 +596,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -601,14 +612,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 2> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LModI(LOperand* left,
LOperand* right,
@@ -630,7 +641,7 @@ class LModI: public LTemplateInstruction<1, 2, 2> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -647,7 +658,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -666,17 +677,15 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
@@ -684,7 +693,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -702,7 +711,7 @@ class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
+class LMultiplySubD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplySubD(LOperand* minuend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -719,13 +728,13 @@ class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -744,11 +753,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -761,7 +770,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -776,7 +785,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -789,7 +798,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -801,7 +810,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -813,7 +822,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -825,7 +834,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -837,7 +846,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -859,7 +868,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -871,7 +880,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -885,7 +894,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -900,7 +909,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -913,7 +922,7 @@ class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -926,11 +935,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -943,7 +952,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -956,11 +965,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -971,11 +980,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -989,11 +998,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1009,11 +1018,11 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1025,11 +1034,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1042,7 +1051,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1054,11 +1064,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1072,11 +1082,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1093,7 +1103,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1107,7 +1117,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1125,7 +1135,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1134,7 +1145,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1147,7 +1158,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1162,7 +1173,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1179,7 +1190,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1200,7 +1211,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1215,7 +1226,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LRSubI: public LTemplateInstruction<1, 2, 0> {
+class LRSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LRSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1230,7 +1241,7 @@ class LRSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1239,7 +1250,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1248,7 +1259,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1257,7 +1268,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1268,16 +1279,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1288,11 +1301,11 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LControlInstruction<1, 1> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1309,7 +1322,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 1> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1321,7 +1334,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1334,7 +1347,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1349,7 +1362,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1368,7 +1381,7 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1392,7 +1405,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1404,7 +1417,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1419,7 +1432,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1434,7 +1447,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1449,20 +1462,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1474,16 +1496,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1495,16 +1519,18 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1526,7 +1552,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1539,7 +1565,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1554,7 +1580,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1567,7 +1593,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1580,7 +1607,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1599,12 +1626,12 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* object, LOperand* key) {
inputs_[0] = object;
@@ -1618,14 +1645,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1641,7 +1668,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1656,7 +1683,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1675,7 +1702,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1688,11 +1715,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1707,11 +1734,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1723,7 +1750,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1736,7 +1763,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1745,28 +1789,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1778,14 +1822,14 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1797,7 +1841,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1809,19 +1853,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1832,13 +1876,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1849,26 +1893,26 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1883,30 +1927,30 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1917,13 +1961,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1934,13 +1978,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1950,7 +1994,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1962,7 +2006,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1975,7 +2019,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1987,7 +2031,7 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1999,7 +2043,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
@@ -2011,7 +2055,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2028,17 +2072,13 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2048,17 +2088,13 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2068,22 +2104,19 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2092,7 +2125,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2104,7 +2137,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2117,7 +2150,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2134,7 +2167,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2149,7 +2182,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
@@ -2158,7 +2191,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2171,14 +2204,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2197,7 +2230,7 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() {
if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
@@ -2209,7 +2242,7 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
@@ -2224,13 +2257,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp) {
@@ -2245,7 +2278,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2254,7 +2287,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2270,7 +2303,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2286,7 +2319,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2301,7 +2334,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2314,20 +2347,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2340,7 +2373,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2353,7 +2386,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2365,7 +2398,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2378,7 +2411,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2390,7 +2423,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2402,7 +2435,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2416,7 +2449,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[1] = size;
@@ -2433,21 +2466,21 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2460,7 +2493,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2472,7 +2505,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2485,11 +2518,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2502,16 +2535,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2523,7 +2558,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2535,7 +2570,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2551,7 +2586,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2565,7 +2600,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2580,7 +2615,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2590,7 +2625,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 12fce439f..7f65023ed 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -31,12 +31,13 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -253,6 +254,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -423,7 +439,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -431,7 +447,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (r.IsDouble()) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
+ ASSERT(r.IsSmiOrTagged());
__ LoadObject(scratch, literal);
}
return scratch;
@@ -458,7 +474,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -486,7 +502,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -543,7 +559,7 @@ Operand LCodeGen::ToOperand(LOperand* op) {
Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
- return Operand(constant->handle());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@@ -690,7 +706,7 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -1098,8 +1114,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1573,21 +1588,17 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToRepresentation(
- LConstantOperand::cast(right_op),
- instr->hydrogen()->right()->representation());
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1598,7 +1609,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
- __ rsb(result, left, Operand::Zero());
+ if (overflow) {
+ __ rsb(result, left, Operand::Zero(), SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ rsb(result, left, Operand::Zero());
+ }
break;
case 0:
if (bailout_on_minus_zero) {
@@ -1619,23 +1635,21 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- }
-
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
-
} else {
// Generate standard code.
__ mov(ip, Operand(constant));
@@ -1644,12 +1658,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->temp()), left, right);
- }
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
+ Register scratch = scratch0();
// scratch:result = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1669,12 +1682,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
+ __ teq(left, Operand(right));
+ __ b(pl, &done);
+ // Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ cmp(ToRegister(instr->temp()), Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(eq, instr->environment());
__ bind(&done);
}
}
@@ -1871,7 +1884,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -2735,15 +2748,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -3722,14 +3735,14 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3878,80 +3891,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->global_object()).is(r0));
-
+ // Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ ldr(native_context, FieldMemOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand::Zero());
- __ b(eq, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
- // r1: state[0].
- // r0: state[1].
+ Register state1 = ToRegister(instr->scratch2());
+ __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ and_(r3, r1, Operand(0xFFFF));
- __ mov(r4, Operand(18273));
- __ mul(r3, r3, r4);
- __ add(r1, r3, Operand(r1, LSR, 16));
+ Register scratch3 = ToRegister(instr->scratch3());
+ Register scratch4 = scratch0();
+ __ and_(scratch3, state0, Operand(0xFFFF));
+ __ mov(scratch4, Operand(18273));
+ __ mul(scratch3, scratch3, scratch4);
+ __ add(state0, scratch3, Operand(state0, LSR, 16));
// Save state[0].
- __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+ __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(r3, r0, Operand(0xFFFF));
- __ mov(r4, Operand(36969));
- __ mul(r3, r3, r4);
- __ add(r0, r3, Operand(r0, LSR, 16));
+ __ and_(scratch3, state1, Operand(0xFFFF));
+ __ mov(scratch4, Operand(36969));
+ __ mul(scratch3, scratch3, scratch4);
+ __ add(state1, scratch3, Operand(state1, LSR, 16));
// Save state[1].
- __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+ __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ and_(r0, r0, Operand(0x3FFFF));
- __ add(r0, r0, Operand(r1, LSL, 14));
+ Register random = scratch4;
+ __ and_(random, state1, Operand(0x3FFFF));
+ __ add(random, random, Operand(state0, LSL, 14));
- __ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
+ __ mov(scratch3, Operand(0x41000000));
+ __ orr(scratch3, scratch3, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vmov(result, random, scratch3);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in r0.
+ __ mov(scratch4, Operand::Zero());
+ DwVfpRegister scratch5 = double_scratch0();
+ __ vmov(scratch5, scratch4, scratch3);
+ __ vsub(result, result, scratch5);
}
@@ -4146,6 +4143,15 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4520,12 +4526,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4573,12 +4581,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4661,16 +4671,16 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4686,16 +4696,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4768,12 +4778,14 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4902,7 +4914,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
LowDwVfpRegister double_scratch = double_scratch0();
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4913,18 +4925,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// The carry flag is set when we reach this deferred code as we just executed
// SmiUntag(heap_object, SetCC)
STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(input_reg, input_reg, Operand(input_reg));
+ __ adc(scratch2, input_reg, Operand(input_reg));
// Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label heap_number;
@@ -4932,23 +4940,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
+ __ cmp(scratch2, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ mov(input_reg, Operand::Zero());
__ b(&done);
__ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
-
- __ ECMAToInt32(input_reg, double_scratch2,
- scratch1, scratch2, scratch3, double_scratch);
-
+ __ TruncateHeapNumberToI(input_reg, scratch2);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
+ __ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr->environment());
@@ -4966,12 +4969,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5018,14 +5023,11 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ ECMAToInt32(result_reg, double_input,
- scratch1, scratch2, scratch3, double_scratch);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@@ -5046,14 +5048,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ ECMAToInt32(result_reg, double_input,
- scratch1, scratch2, scratch3, double_scratch);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@@ -5132,18 +5131,18 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
+ Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
- __ cmp(reg, Operand(target));
+ __ cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr->environment());
}
@@ -5162,17 +5161,17 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5265,12 +5264,14 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5422,8 +5423,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ mov(r1, Operand(instr->hydrogen()->shared_info()));
- __ push(r1);
+ __ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
@@ -5621,12 +5621,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5641,9 +5643,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
- StackCheckStub stub;
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5680,9 +5683,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index d0bfcbbb9..4b6b5ca8e 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -43,7 +43,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -149,7 +149,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -227,6 +226,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -420,7 +422,7 @@ class LCodeGen BASE_EMBEDDED {
int old_position_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
@@ -468,7 +470,7 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -477,7 +479,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
index 9dd09c8d0..044c2864a 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 974b56959..7df785776 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -829,26 +829,6 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
-void MacroAssembler::ConvertNumberToInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch1,
- LowDwVfpRegister double_scratch2,
- Label* not_number) {
- Label done;
- UntagAndJumpIfSmi(dst, object, &done);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
- vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
- ECMAToInt32(dst, double_scratch1,
- scratch1, scratch2, scratch3, double_scratch2);
-
- bind(&done);
-}
-
-
void MacroAssembler::LoadNumber(Register object,
LowDwVfpRegister dst,
Register heap_number_map,
@@ -1702,15 +1682,9 @@ void MacroAssembler::Allocate(int object_size,
ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code());
- // Set up allocation top address and object size registers.
+ // Set up allocation top address register.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
mov(topaddr, Operand(allocation_top));
- Operand obj_size_operand = Operand(object_size);
- if (!obj_size_operand.is_single_instruction(this)) {
- // We are about to steal IP, so we need to load this value first
- mov(obj_size_reg, obj_size_operand);
- }
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1734,7 +1708,7 @@ void MacroAssembler::Allocate(int object_size,
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
@@ -1748,13 +1722,25 @@ void MacroAssembler::Allocate(int object_size,
}
// Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- if (obj_size_operand.is_single_instruction(this)) {
- // We can add the size as an immediate
- add(scratch2, result, obj_size_operand, SetCC);
- } else {
- // Doesn't fit in an immediate, we have to use the register
- add(scratch2, result, obj_size_reg, SetCC);
+ // to calculate the new top. We must preserve the ip register at this
+ // point, so we cannot just use add().
+ ASSERT(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ ASSERT(bits_operand.is_single_instruction(this));
+ add(scratch2, source, bits_operand, SetCC, cond);
+ source = scratch2;
+ cond = cc;
+ }
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
@@ -2299,7 +2285,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@@ -2368,15 +2353,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Label leave_exit_frame;
Label return_value_loaded;
- if (returns_handle) {
- Label load_return_value;
- cmp(r0, Operand::Zero());
- b(eq, &load_return_value);
- // derefernce returned value
- ldr(r0, MemOperand(r0));
- b(&return_value_loaded);
- bind(&load_return_value);
- }
// load value from ReturnValue
ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
bind(&return_value_loaded);
@@ -2532,84 +2508,76 @@ void MacroAssembler::TryInt32Floor(Register result,
bind(&exception);
}
-
-void MacroAssembler::ECMAToInt32(Register result,
- DwVfpRegister double_input,
- Register scratch,
- Register scratch_high,
- Register scratch_low,
- LowDwVfpRegister double_scratch) {
- ASSERT(!scratch_high.is(result));
- ASSERT(!scratch_low.is(result));
- ASSERT(!scratch_low.is(scratch_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch_high) &&
- !scratch.is(scratch_low));
- ASSERT(!double_input.is(double_scratch));
-
- Label out_of_range, only_low, negate, done;
-
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister double_input,
+ Label* done) {
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
- sub(scratch, result, Operand(1));
- cmp(scratch, Operand(0x7ffffffe));
- b(lt, &done);
+ sub(ip, result, Operand(1));
+ cmp(ip, Operand(0x7ffffffe));
+ b(lt, done);
+}
- vmov(scratch_low, scratch_high, double_input);
- Ubfx(scratch, scratch_high,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // Load scratch with exponent - 1. This is faster than loading
- // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
- sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
- // If exponent is greater than or equal to 84, the 32 less significant
- // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
- // the result is 0.
- // Compare exponent with 84 (compare exponent - 1 with 83).
- cmp(scratch, Operand(83));
- b(ge, &out_of_range);
-
- // If we reach this code, 31 <= exponent <= 83.
- // So, we don't have to handle cases where 0 <= exponent <= 20 for
- // which we would need to shift right the high part of the mantissa.
- // Scratch contains exponent - 1.
- // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
- rsb(scratch, scratch, Operand(51), SetCC);
- b(ls, &only_low);
- // 21 <= exponent <= 51, shift scratch_low and scratch_high
- // to generate the result.
- mov(scratch_low, Operand(scratch_low, LSR, scratch));
- // Scratch contains: 52 - exponent.
- // We needs: exponent - 20.
- // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
- rsb(scratch, scratch, Operand(32));
- Ubfx(result, scratch_high,
- 0, HeapNumber::kMantissaBitsInTopWord);
- // Set the implicit 1 before the mantissa part in scratch_high.
- orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- orr(result, scratch_low, Operand(result, LSL, scratch));
- b(&negate);
-
- bind(&out_of_range);
- mov(result, Operand::Zero());
- b(&done);
- bind(&only_low);
- // 52 <= exponent <= 83, shift only scratch_low.
- // On entry, scratch contains: 52 - exponent.
- rsb(scratch, scratch, Operand::Zero());
- mov(result, Operand(scratch_low, LSL, scratch));
-
- bind(&negate);
- // If input was positive, scratch_high ASR 31 equals 0 and
- // scratch_high LSR 31 equals zero.
- // New result = (result eor 0) + 0 = result.
- // If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
- eor(result, result, Operand(scratch_high, ASR, 31));
- add(result, result, Operand(scratch_high, LSR, 31));
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DwVfpRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ vstr(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(sp, result, 0, true, true);
+ CallStub(&stub);
+
+ add(sp, sp, Operand(kDoubleSize));
+ pop(lr);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ ASSERT(!result.is(object));
+
+ vldr(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(lr);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_number) {
+ Label done;
+ ASSERT(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ TruncateHeapNumberToI(result, object);
bind(&done);
}
@@ -2841,6 +2809,11 @@ void MacroAssembler::Abort(BailoutReason reason) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
mov(r0, Operand(p0));
@@ -3824,6 +3797,30 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -3848,10 +3845,13 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int instructions)
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -3861,7 +3861,9 @@ CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CPU::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 8b9fa2b22..9abd5a0c3 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -62,6 +62,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -491,19 +499,6 @@ class MacroAssembler: public Assembler {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
- // Converts the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- void ConvertNumberToInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch1,
- LowDwVfpRegister double_scratch2,
- Label* not_int32);
-
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
@@ -989,15 +984,34 @@ class MacroAssembler: public Assembler {
Label* exact);
// Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Double_scratch must be between d0 and d15.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void ECMAToInt32(Register result,
- DwVfpRegister double_input,
- Register scratch,
- Register scratch_high,
- Register scratch_low,
- LowDwVfpRegister double_scratch);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DwVfpRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_int32);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
@@ -1097,7 +1111,6 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_fp);
// Jump to a runtime routine.
@@ -1416,7 +1429,14 @@ class MacroAssembler: public Assembler {
// an assertion to fail.
class CodePatcher {
public:
- CodePatcher(byte* address, int instructions);
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@@ -1436,6 +1456,7 @@ class CodePatcher {
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 189ea8d77..cbc34e10b 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -134,7 +134,6 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
__ bind(&start_label_); // And then continue from here.
}
@@ -872,7 +871,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -938,37 +937,8 @@ void RegExpMacroAssemblerARM::PopRegister(int register_index) {
}
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
+ __ mov_label_offset(r0, label);
Push(r0);
CheckStackLimit();
}
@@ -1055,16 +1025,34 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ __ PrepareCallCFunction(3, scratch);
+
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
// Code* of self.
__ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ int stack_alignment = OS::ActivationFrameAlignment();
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ sub(sp, sp, Operand(stack_alignment));
+
+ // r0 will point to the return address, placed by DirectCEntry.
+ __ mov(r0, sp);
+
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ __ mov(ip, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, ip);
+
+ // Drop the return address from the stack.
+ __ add(sp, sp, Operand(stack_alignment));
+
+ ASSERT(stack_alignment != 0);
+ __ ldr(sp, MemOperand(sp, 0));
+
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1079,7 +1067,6 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@@ -1262,53 +1249,6 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
}
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
bool RegExpMacroAssemblerARM::CanReadUnaligned() {
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
@@ -1351,17 +1291,6 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 1825752eb..9f07489e1 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -160,9 +160,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@@ -212,14 +209,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index c9e3616d9..def181863 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1686,20 +1686,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
- int32_t arg0, int32_t arg1);
-typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
- int32_t arg1);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
-typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
@@ -1839,9 +1831,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- } else if (
- redirection->type() == ExternalReference::DIRECT_API_CALL ||
- redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x",
reinterpret_cast<void*>(external), arg0);
@@ -1851,22 +1841,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeDirectApiCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
- target(arg0);
- }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
} else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL ||
- redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1);
@@ -1876,22 +1855,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeProfilingApiCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
- target(arg0, arg1);
- }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1);
@@ -1901,22 +1869,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeDirectGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
- target(arg0, arg1);
- }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1, arg2);
@@ -1926,20 +1883,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
- SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg0, arg1, arg2);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeProfilingGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(
- external);
- target(arg0, arg1, arg2);
- }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ target(arg0, arg1, arg2);
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 06bd66e92..085af3f2b 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -785,6 +785,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -793,10 +798,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ push(scratch);
}
@@ -811,7 +812,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ mov(r0, Operand(6));
+ __ mov(r0, Operand(StubCache::kInterceptorArgsLength));
__ mov(r1, Operand(ref));
CEntryStub stub(1);
@@ -903,23 +904,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_API_CALL :
- ExternalReference::DIRECT_API_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_API_CALL :
- ExternalReference::PROFILING_API_CALL_NEW;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
masm->isolate());
@@ -930,11 +921,39 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
thunk_ref,
r1,
kStackUnwindSpace,
- returns_handle,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Assign stack space for the call arguments.
+ __ sub(sp, sp, Operand(stack_space * kPointerSize));
+ // Write holder to stack frame.
+ __ str(receiver, MemOperand(sp, 0));
+ // Write receiver to stack frame.
+ int index = stack_space - 1;
+ __ str(receiver, MemOperand(sp, index * kPointerSize));
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ str(receiver, MemOperand(sp, index-- * kPointerSize));
+ }
+
+ GenerateFastApiDirectCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -1092,7 +1111,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -1150,21 +1169,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register scratch1) {
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1318,7 +1322,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1406,10 +1410,26 @@ void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
__ push(receiver());
__ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
@@ -1419,13 +1439,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
}
- __ Push(reg, scratch3());
+ __ push(scratch3());
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ mov(scratch4(), scratch3());
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), name());
+ __ Push(scratch4(), reg, name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
@@ -1439,23 +1459,14 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kStackUnwindSpace = kFastApiCallArguments + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_GETTER_CALL :
- ExternalReference::DIRECT_GETTER_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_GETTER_CALL :
- ExternalReference::PROFILING_GETTER_CALL_NEW;
+ ExternalReference::PROFILING_GETTER_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
isolate());
@@ -1464,8 +1475,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
r2,
kStackUnwindSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1553,7 +1563,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -2811,6 +2821,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2894,47 +2922,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- __ cmp(scratch1(), Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(scratch1(), Operand(cell));
- __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
- __ ldr(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- __ cmp(scratch3(), scratch2());
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(
- counters->named_store_global_inline(), 1, scratch1(), scratch2());
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(
- counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
@@ -3190,509 +3177,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- DwVfpRegister double_scratch0,
- LowDwVfpRegister double_scratch1,
- Label* fail) {
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ sub(ip, key, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
- __ b(ne, fail);
- __ TrySmiTag(key, scratch0, fail);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ UntagAndJumpIfNotSmi(r5, value, &slow);
- } else {
- __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
- }
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r5: value (integer).
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r7);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ vmov(s2, r5);
- __ vcvt_f64_s32(d0, s2);
- __ add(r3, r3, Operand(key, LSL, 2));
- // r3: effective address of the double element
- __ vstr(d0, r3, 0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // r3: external array.
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ ECMAToInt32(r5, d0, r6, r7, r9, d1);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r4;
- Register elements_reg = r3;
- Register length_reg = r5;
- Register scratch2 = r6;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(length_reg, scratch);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch (elements backing store)
- // -- r4 : scratch
- // -- r5 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register length_reg = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
-
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- __ cmp(key_reg, scratch1);
- if (IsGrowStoreMode(store_mode)) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
- scratch1, d0, &transition_elements_kind);
- __ Ret();
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch1,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
- scratch2, d0, &transition_elements_kind);
-
- __ mov(scratch1, Operand(kHoleNanLower32));
- __ mov(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ str(scratch1, FieldMemOperand(elements_reg, offset));
- __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ cmp(length_reg, scratch1);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index ae8a0b58b..fbff62dd6 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -43,7 +43,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic.h"
-#include "isolate.h"
+#include "isolate-inl.h"
#include "jsregexp.h"
#include "lazy-instance.h"
#include "platform.h"
@@ -119,7 +119,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
- jit_cookie_ = V8::RandomPrivate(isolate);
+ jit_cookie_ = isolate->random_number_generator()->NextInt();
}
if (buffer == NULL) {
@@ -798,7 +798,7 @@ void RelocInfo::Print(Isolate* isolate, FILE* out) {
target_object()->ShortPrint(out);
PrintF(out, ")");
} else if (rmode_ == EXTERNAL_REFERENCE) {
- ExternalReferenceEncoder ref_encoder;
+ ExternalReferenceEncoder ref_encoder(isolate);
PrintF(out, " (%s) (%p)",
ref_encoder.NameOfAddress(*target_reference_address()),
*target_reference_address());
@@ -891,7 +891,7 @@ void ExternalReference::SetUp() {
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
- math_exp_data_mutex = OS::CreateMutex();
+ math_exp_data_mutex = new Mutex();
}
@@ -899,7 +899,7 @@ void ExternalReference::InitializeMathExpData() {
// Early return?
if (math_exp_data_initialized) return;
- math_exp_data_mutex->Lock();
+ LockGuard<Mutex> lock_guard(math_exp_data_mutex);
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
@@ -935,7 +935,6 @@ void ExternalReference::InitializeMathExpData() {
math_exp_data_initialized = true;
}
- math_exp_data_mutex->Unlock();
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index d70d5aa92..6b399f208 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -196,7 +196,6 @@ class Label BASE_EMBEDDED {
}
friend class Assembler;
- friend class RegexpAssembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
};
@@ -425,7 +424,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit(Heap* heap);
- inline void Visit(ObjectVisitor* v);
+ inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Patch the code with some other code.
void PatchCode(byte* instructions, int instruction_count);
@@ -644,38 +643,21 @@ class ExternalReference BASE_EMBEDDED {
BUILTIN_FP_INT_CALL,
// Direct call to API function callback.
- // Handle<Value> f(v8::Arguments&)
+ // void f(v8::FunctionCallbackInfo&)
DIRECT_API_CALL,
- // Call to invocation callback via InvokeInvocationCallback.
- // Handle<Value> f(v8::Arguments&, v8::InvocationCallback)
- PROFILING_API_CALL,
-
- // Direct call to API function callback.
- // void f(v8::Arguments&)
- DIRECT_API_CALL_NEW,
-
// Call to function callback via InvokeFunctionCallback.
- // void f(v8::Arguments&, v8::FunctionCallback)
- PROFILING_API_CALL_NEW,
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ PROFILING_API_CALL,
// Direct call to accessor getter callback.
- // Handle<value> f(Local<String> property, AccessorInfo& info)
+ // void f(Local<String> property, PropertyCallbackInfo& info)
DIRECT_GETTER_CALL,
- // Call to accessor getter callback via InvokeAccessorGetter.
- // Handle<value> f(Local<String> property, AccessorInfo& info,
- // AccessorGetter getter)
- PROFILING_GETTER_CALL,
-
- // Direct call to accessor getter callback.
- // void f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL_NEW,
-
// Call to accessor getter callback via InvokeAccessorGetterCallback.
- // void f(Local<String> property, AccessorInfo& info,
+ // void f(Local<String> property, PropertyCallbackInfo& info,
// AccessorGetterCallback callback)
- PROFILING_GETTER_CALL_NEW
+ PROFILING_GETTER_CALL
};
static void SetUp();
@@ -708,7 +690,7 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const SCTableReference& table_ref);
- // Isolate::Current() as an external reference.
+ // Isolate as an external reference.
static ExternalReference isolate_address(Isolate* isolate);
// One-of-a-kind references. These references are not part of a general
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 324dc0e0c..823dedee0 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -599,7 +599,7 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!HEAP->InNewSpace(*candidate)) {
+ if (!lookup->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
@@ -646,8 +646,15 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->value()->IsString());
Handle<String> name = Handle<String>::cast(key->value());
+ check_type_ = oracle->GetCallCheckType(this);
receiver_types_.Clear();
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ if (check_type_ == RECEIVER_MAP_CHECK) {
+ oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
+ } else {
+ holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
+ receiver_types_.Add(handle(holder_->map()), oracle->zone());
+ }
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
@@ -657,17 +664,8 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
}
#endif
- check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_.length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_.at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
- map = Handle<Map>(holder_->map());
- }
+ Handle<Map> map = receiver_types_.first();
is_monomorphic_ = ComputeTarget(map, name);
}
}
@@ -860,12 +858,13 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
-class RegExpUnparser: public RegExpVisitor {
+class RegExpUnparser V8_FINAL : public RegExpVisitor {
public:
explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
-#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
+#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
+ void* data) V8_OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
@@ -963,12 +962,12 @@ void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
if (that->elements()->length() == 1) {
- that->elements()->at(0).data.u_atom->Accept(this, data);
+ that->elements()->at(0).tree()->Accept(this, data);
} else {
stream()->Add("(!");
for (int i = 0; i < that->elements()->length(); i++) {
stream()->Add(" ");
- that->elements()->at(i).data.u_atom->Accept(this, data);
+ that->elements()->at(i).tree()->Accept(this, data);
}
stream()->Add(")");
}
@@ -1084,7 +1083,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
- add_flag(kDontOptimize); \
+ set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
@@ -1096,7 +1095,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
- add_flag(kDontOptimize); \
+ set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
@@ -1182,7 +1181,6 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
Handle<String> Literal::ToString() {
if (value_->IsString()) return Handle<String>::cast(value_);
- Factory* factory = Isolate::Current()->factory();
ASSERT(value_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
@@ -1194,7 +1192,7 @@ Handle<String> Literal::ToString() {
} else {
str = DoubleToCString(value_->Number(), buffer);
}
- return factory->NewStringFromAscii(CStrVector(str));
+ return isolate_->factory()->NewStringFromAscii(CStrVector(str));
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index a8b74213a..c63090687 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -123,10 +123,6 @@ namespace internal {
STATEMENT_NODE_LIST(V) \
EXPRESSION_NODE_LIST(V)
-#ifdef WIN32
-#undef Yield
-#endif
-
// Forward declarations
class AstConstructionVisitor;
template<class> class AstNodeFactory;
@@ -165,22 +161,23 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::NodeType node_type() const { return AstNode::k##type; } \
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v) V8_OVERRIDE; \
+ virtual AstNode::NodeType node_type() const V8_FINAL V8_OVERRIDE { \
+ return AstNode::k##type; \
+ } \
template<class> friend class AstNodeFactory;
enum AstPropertiesFlag {
kDontInline,
- kDontOptimize,
kDontSelfOptimize,
kDontSoftInline,
kDontCache
};
-class AstProperties BASE_EMBEDDED {
+class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
@@ -209,9 +206,9 @@ class AstNode: public ZoneObject {
return zone->New(static_cast<int>(size));
}
- AstNode() { }
+ AstNode() {}
- virtual ~AstNode() { }
+ virtual ~AstNode() {}
virtual void Accept(AstVisitor* v) = 0;
virtual NodeType node_type() const = 0;
@@ -254,7 +251,7 @@ class AstNode: public ZoneObject {
};
-class Statement: public AstNode {
+class Statement : public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
@@ -269,7 +266,7 @@ class Statement: public AstNode {
};
-class SmallMapList {
+class SmallMapList V8_FINAL {
public:
SmallMapList() {}
SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
@@ -291,6 +288,14 @@ class SmallMapList {
Add(map, zone);
}
+ void FilterForPossibleTransitions(Map* root_map) {
+ for (int i = list_.length() - 1; i >= 0; i--) {
+ if (at(i)->FindRootMap() != root_map) {
+ list_.RemoveElement(list_.at(i));
+ }
+ }
+ }
+
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
@@ -310,7 +315,7 @@ class SmallMapList {
};
-class Expression: public AstNode {
+class Expression : public AstNode {
public:
enum Context {
// Not assigned a context yet, or else will not be visited during
@@ -369,12 +374,6 @@ class Expression: public AstNode {
UNREACHABLE();
return NULL;
}
- Handle<Map> GetMonomorphicReceiverType() {
- ASSERT(IsMonomorphic());
- SmallMapList* types = GetReceiverTypes();
- ASSERT(types != NULL && types->length() == 1);
- return types->at(0);
- }
virtual KeyedAccessStoreMode GetStoreMode() {
UNREACHABLE();
return STANDARD_STORE;
@@ -403,7 +402,7 @@ class Expression: public AstNode {
};
-class BreakableStatement: public Statement {
+class BreakableStatement : public Statement {
public:
enum BreakableType {
TARGET_FOR_ANONYMOUS,
@@ -415,7 +414,9 @@ class BreakableStatement: public Statement {
ZoneStringList* labels() const { return labels_; }
// Type testing & conversion.
- virtual BreakableStatement* AsBreakableStatement() { return this; }
+ virtual BreakableStatement* AsBreakableStatement() V8_FINAL V8_OVERRIDE {
+ return this;
+ }
// Code generation
Label* break_target() { return &break_target_; }
@@ -448,7 +449,7 @@ class BreakableStatement: public Statement {
};
-class Block: public BreakableStatement {
+class Block V8_FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
@@ -459,7 +460,7 @@ class Block: public BreakableStatement {
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
- virtual bool IsJump() const {
+ virtual bool IsJump() const V8_OVERRIDE {
return !statements_.is_empty() && statements_.last()->IsJump()
&& labels() == NULL; // Good enough as an approximation...
}
@@ -486,7 +487,7 @@ class Block: public BreakableStatement {
};
-class Declaration: public AstNode {
+class Declaration : public AstNode {
public:
VariableProxy* proxy() const { return proxy_; }
VariableMode mode() const { return mode_; }
@@ -513,11 +514,11 @@ class Declaration: public AstNode {
};
-class VariableDeclaration: public Declaration {
+class VariableDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(VariableDeclaration)
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
}
@@ -530,15 +531,15 @@ class VariableDeclaration: public Declaration {
};
-class FunctionDeclaration: public Declaration {
+class FunctionDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
- virtual bool IsInlineable() const;
+ virtual bool IsInlineable() const V8_OVERRIDE;
protected:
FunctionDeclaration(VariableProxy* proxy,
@@ -557,12 +558,12 @@ class FunctionDeclaration: public Declaration {
};
-class ModuleDeclaration: public Declaration {
+class ModuleDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ModuleDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
@@ -579,12 +580,12 @@ class ModuleDeclaration: public Declaration {
};
-class ImportDeclaration: public Declaration {
+class ImportDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
@@ -601,11 +602,11 @@ class ImportDeclaration: public Declaration {
};
-class ExportDeclaration: public Declaration {
+class ExportDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ExportDeclaration)
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
@@ -615,7 +616,7 @@ class ExportDeclaration: public Declaration {
};
-class Module: public AstNode {
+class Module : public AstNode {
public:
Interface* interface() const { return interface_; }
Block* body() const { return body_; }
@@ -634,7 +635,7 @@ class Module: public AstNode {
};
-class ModuleLiteral: public Module {
+class ModuleLiteral V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleLiteral)
@@ -643,7 +644,7 @@ class ModuleLiteral: public Module {
};
-class ModuleVariable: public Module {
+class ModuleVariable V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleVariable)
@@ -657,7 +658,7 @@ class ModuleVariable: public Module {
};
-class ModulePath: public Module {
+class ModulePath V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModulePath)
@@ -677,7 +678,7 @@ class ModulePath: public Module {
};
-class ModuleUrl: public Module {
+class ModuleUrl V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleUrl)
@@ -693,7 +694,7 @@ class ModuleUrl: public Module {
};
-class ModuleStatement: public Statement {
+class ModuleStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
@@ -712,10 +713,12 @@ class ModuleStatement: public Statement {
};
-class IterationStatement: public BreakableStatement {
+class IterationStatement : public BreakableStatement {
public:
// Type testing & conversion.
- virtual IterationStatement* AsIterationStatement() { return this; }
+ virtual IterationStatement* AsIterationStatement() V8_FINAL V8_OVERRIDE {
+ return this;
+ }
Statement* body() const { return body_; }
@@ -745,7 +748,7 @@ class IterationStatement: public BreakableStatement {
};
-class DoWhileStatement: public IterationStatement {
+class DoWhileStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -761,8 +764,8 @@ class DoWhileStatement: public IterationStatement {
int condition_position() { return condition_position_; }
void set_condition_position(int pos) { condition_position_ = pos; }
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return back_edge_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
@@ -784,7 +787,7 @@ class DoWhileStatement: public IterationStatement {
};
-class WhileStatement: public IterationStatement {
+class WhileStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(WhileStatement)
@@ -801,8 +804,8 @@ class WhileStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
protected:
@@ -823,7 +826,7 @@ class WhileStatement: public IterationStatement {
};
-class ForStatement: public IterationStatement {
+class ForStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(ForStatement)
@@ -848,8 +851,8 @@ class ForStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -882,7 +885,7 @@ class ForStatement: public IterationStatement {
};
-class ForEachStatement: public IterationStatement {
+class ForEachStatement : public IterationStatement {
public:
enum VisitMode {
ENUMERATE, // for (each in subject) body;
@@ -911,7 +914,7 @@ class ForEachStatement: public IterationStatement {
};
-class ForInStatement: public ForEachStatement {
+class ForInStatement V8_FINAL : public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -926,8 +929,8 @@ class ForInStatement: public ForEachStatement {
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
@@ -943,7 +946,7 @@ class ForInStatement: public ForEachStatement {
};
-class ForOfStatement: public ForEachStatement {
+class ForOfStatement V8_FINAL : public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForOfStatement)
@@ -985,8 +988,8 @@ class ForOfStatement: public ForEachStatement {
return assign_each_;
}
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return BackEdgeId(); }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return BackEdgeId(); }
BailoutId BackEdgeId() const { return back_edge_id_; }
@@ -1008,13 +1011,13 @@ class ForOfStatement: public ForEachStatement {
};
-class ExpressionStatement: public Statement {
+class ExpressionStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
- virtual bool IsJump() const { return expression_->IsThrow(); }
+ virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
explicit ExpressionStatement(Expression* expression)
@@ -1025,16 +1028,16 @@ class ExpressionStatement: public Statement {
};
-class JumpStatement: public Statement {
+class JumpStatement : public Statement {
public:
- virtual bool IsJump() const { return true; }
+ virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
protected:
JumpStatement() {}
};
-class ContinueStatement: public JumpStatement {
+class ContinueStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ContinueStatement)
@@ -1049,7 +1052,7 @@ class ContinueStatement: public JumpStatement {
};
-class BreakStatement: public JumpStatement {
+class BreakStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(BreakStatement)
@@ -1064,7 +1067,7 @@ class BreakStatement: public JumpStatement {
};
-class ReturnStatement: public JumpStatement {
+class ReturnStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ReturnStatement)
@@ -1079,7 +1082,7 @@ class ReturnStatement: public JumpStatement {
};
-class WithStatement: public Statement {
+class WithStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(WithStatement)
@@ -1100,7 +1103,7 @@ class WithStatement: public Statement {
};
-class CaseClause: public ZoneObject {
+class CaseClause V8_FINAL : public ZoneObject {
public:
CaseClause(Isolate* isolate,
Expression* label,
@@ -1137,7 +1140,7 @@ class CaseClause: public ZoneObject {
};
-class SwitchStatement: public BreakableStatement {
+class SwitchStatement V8_FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(SwitchStatement)
@@ -1172,7 +1175,7 @@ class SwitchStatement: public BreakableStatement {
// the parser implicitly creates an empty statement. Use the
// HasThenStatement() and HasElseStatement() functions to check if a
// given if-statement has a then- or an else-part containing code.
-class IfStatement: public Statement {
+class IfStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(IfStatement)
@@ -1183,7 +1186,7 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
- virtual bool IsJump() const {
+ virtual bool IsJump() const V8_OVERRIDE {
return HasThenStatement() && then_statement()->IsJump()
&& HasElseStatement() && else_statement()->IsJump();
}
@@ -1217,7 +1220,7 @@ class IfStatement: public Statement {
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
-class TargetCollector: public AstNode {
+class TargetCollector V8_FINAL : public AstNode {
public:
explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
@@ -1227,9 +1230,9 @@ class TargetCollector: public AstNode {
void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
- virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual NodeType node_type() const { return kInvalid; }
- virtual TargetCollector* AsTargetCollector() { return this; }
+ virtual void Accept(AstVisitor* v) V8_OVERRIDE { UNREACHABLE(); }
+ virtual NodeType node_type() const V8_OVERRIDE { return kInvalid; }
+ virtual TargetCollector* AsTargetCollector() V8_OVERRIDE { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1238,7 +1241,7 @@ class TargetCollector: public AstNode {
};
-class TryStatement: public Statement {
+class TryStatement : public Statement {
public:
void set_escaping_targets(ZoneList<Label*>* targets) {
escaping_targets_ = targets;
@@ -1263,7 +1266,7 @@ class TryStatement: public Statement {
};
-class TryCatchStatement: public TryStatement {
+class TryCatchStatement V8_FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryCatchStatement)
@@ -1290,7 +1293,7 @@ class TryCatchStatement: public TryStatement {
};
-class TryFinallyStatement: public TryStatement {
+class TryFinallyStatement V8_FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryFinallyStatement)
@@ -1306,7 +1309,7 @@ class TryFinallyStatement: public TryStatement {
};
-class DebuggerStatement: public Statement {
+class DebuggerStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(DebuggerStatement)
@@ -1315,7 +1318,7 @@ class DebuggerStatement: public Statement {
};
-class EmptyStatement: public Statement {
+class EmptyStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(EmptyStatement)
@@ -1324,11 +1327,11 @@ class EmptyStatement: public Statement {
};
-class Literal: public Expression {
+class Literal V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Literal)
- virtual bool IsPropertyName() {
+ virtual bool IsPropertyName() V8_OVERRIDE {
if (value_->IsInternalizedString()) {
uint32_t ignored;
return !String::cast(*value_)->AsArrayIndex(&ignored);
@@ -1341,8 +1344,12 @@ class Literal: public Expression {
return Handle<String>::cast(value_);
}
- virtual bool ToBooleanIsTrue() { return value_->BooleanValue(); }
- virtual bool ToBooleanIsFalse() { return !value_->BooleanValue(); }
+ virtual bool ToBooleanIsTrue() V8_OVERRIDE {
+ return value_->BooleanValue();
+ }
+ virtual bool ToBooleanIsFalse() V8_OVERRIDE {
+ return !value_->BooleanValue();
+ }
// Identity testers.
bool IsNull() const {
@@ -1375,17 +1382,20 @@ class Literal: public Expression {
protected:
Literal(Isolate* isolate, Handle<Object> value)
: Expression(isolate),
- value_(value) { }
+ value_(value),
+ isolate_(isolate) { }
private:
Handle<String> ToString();
Handle<Object> value_;
+ // TODO(dcarney): remove. this is only needed for Match and Hash.
+ Isolate* isolate_;
};
// Base class for literals that needs space in the corresponding JSFunction.
-class MaterializedLiteral: public Expression {
+class MaterializedLiteral : public Expression {
public:
virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
@@ -1417,7 +1427,7 @@ class MaterializedLiteral: public Expression {
// Property is used for passing information
// about an object literal's properties from the parser
// to the code generator.
-class ObjectLiteralProperty: public ZoneObject {
+class ObjectLiteralProperty V8_FINAL : public ZoneObject {
public:
enum Kind {
CONSTANT, // Property with constant value (compile time).
@@ -1460,7 +1470,7 @@ class ObjectLiteralProperty: public ZoneObject {
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
-class ObjectLiteral: public MaterializedLiteral {
+class ObjectLiteral V8_FINAL : public MaterializedLiteral {
public:
typedef ObjectLiteralProperty Property;
@@ -1518,7 +1528,7 @@ class ObjectLiteral: public MaterializedLiteral {
// Node for capturing a regexp literal.
-class RegExpLiteral: public MaterializedLiteral {
+class RegExpLiteral V8_FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(RegExpLiteral)
@@ -1541,7 +1551,7 @@ class RegExpLiteral: public MaterializedLiteral {
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
-class ArrayLiteral: public MaterializedLiteral {
+class ArrayLiteral V8_FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(ArrayLiteral)
@@ -1572,11 +1582,11 @@ class ArrayLiteral: public MaterializedLiteral {
};
-class VariableProxy: public Expression {
+class VariableProxy V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
- virtual bool IsValidLeftHandSide() {
+ virtual bool IsValidLeftHandSide() V8_OVERRIDE {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
@@ -1624,15 +1634,15 @@ class VariableProxy: public Expression {
};
-class Property: public Expression {
+class Property V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Property)
- virtual bool IsValidLeftHandSide() { return true; }
+ virtual bool IsValidLeftHandSide() V8_OVERRIDE { return true; }
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId LoadId() const { return load_id_; }
@@ -1642,9 +1652,11 @@ class Property: public Expression {
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual KeyedAccessStoreMode GetStoreMode() {
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return STANDARD_STORE;
}
bool IsUninitialized() { return is_uninitialized_; }
@@ -1681,19 +1693,21 @@ class Property: public Expression {
};
-class Call: public Expression {
+class Call V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Call)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_FINAL { return pos_; }
// Type feedback information.
TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
void set_string_check(Handle<JSObject> holder) {
@@ -1764,18 +1778,18 @@ class Call: public Expression {
};
-class CallNew: public Expression {
+class CallNew V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallNew)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
Handle<Cell> allocation_info_cell() const {
@@ -1815,7 +1829,7 @@ class CallNew: public Expression {
// language construct. Instead it is used to call a C or JS function
// with a set of arguments. This is used from the builtins that are
// implemented in JavaScript (see "v8natives.js").
-class CallRuntime: public Expression {
+class CallRuntime V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallRuntime)
@@ -1843,18 +1857,19 @@ class CallRuntime: public Expression {
};
-class UnaryOperation: public Expression {
+class UnaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(UnaryOperation)
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId MaterializeTrueId() { return materialize_true_id_; }
BailoutId MaterializeFalseId() { return materialize_false_id_; }
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ virtual void RecordToBooleanTypeFeedback(
+ TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
UnaryOperation(Isolate* isolate,
@@ -1882,7 +1897,7 @@ class UnaryOperation: public Expression {
};
-class BinaryOperation: public Expression {
+class BinaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(BinaryOperation)
@@ -1891,7 +1906,7 @@ class BinaryOperation: public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId RightId() const { return right_id_; }
@@ -1899,7 +1914,8 @@ class BinaryOperation: public Expression {
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ virtual void RecordToBooleanTypeFeedback(
+ TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
BinaryOperation(Isolate* isolate,
@@ -1932,7 +1948,7 @@ class BinaryOperation: public Expression {
};
-class CountOperation: public Expression {
+class CountOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CountOperation)
@@ -1945,14 +1961,14 @@ class CountOperation: public Expression {
}
Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
- virtual void MarkAsStatement() { is_prefix_ = true; }
-
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual KeyedAccessStoreMode GetStoreMode() {
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
TypeInfo type() const { return type_; }
@@ -1994,14 +2010,14 @@ class CountOperation: public Expression {
};
-class CompareOperation: public Expression {
+class CompareOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CompareOperation)
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
@@ -2023,7 +2039,8 @@ class CompareOperation: public Expression {
op_(op),
left_(left),
right_(right),
- pos_(pos) {
+ pos_(pos),
+ combined_type_(Type::Null(), isolate) {
ASSERT(Token::IsCompareOp(op));
}
@@ -2037,7 +2054,7 @@ class CompareOperation: public Expression {
};
-class Conditional: public Expression {
+class Conditional V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Conditional)
@@ -2078,7 +2095,7 @@ class Conditional: public Expression {
};
-class Assignment: public Expression {
+class Assignment V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Assignment)
@@ -2089,7 +2106,7 @@ class Assignment: public Expression {
Token::Value op() const { return op_; }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -2100,10 +2117,12 @@ class Assignment: public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
bool IsUninitialized() { return is_uninitialized_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual KeyedAccessStoreMode GetStoreMode() {
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
@@ -2139,7 +2158,7 @@ class Assignment: public Expression {
};
-class Yield: public Expression {
+class Yield V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Yield)
@@ -2153,7 +2172,7 @@ class Yield: public Expression {
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
// Delegating yield surrounds the "yield" in a "try/catch". This index
// locates the catch handler in the handler table, and is equivalent to
@@ -2189,12 +2208,12 @@ class Yield: public Expression {
};
-class Throw: public Expression {
+class Throw V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
protected:
Throw(Isolate* isolate, Expression* exception, int pos)
@@ -2206,7 +2225,7 @@ class Throw: public Expression {
};
-class FunctionLiteral: public Expression {
+class FunctionLiteral V8_FINAL : public Expression {
public:
enum FunctionType {
ANONYMOUS_EXPRESSION,
@@ -2298,6 +2317,12 @@ class FunctionLiteral: public Expression {
ast_properties_ = *ast_properties;
}
+ bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ void set_dont_optimize_reason(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
+
protected:
FunctionLiteral(Isolate* isolate,
Handle<String> name,
@@ -2317,6 +2342,7 @@ class FunctionLiteral: public Expression {
scope_(scope),
body_(body),
inferred_name_(isolate->factory()->empty_string()),
+ dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
handler_count_(handler_count),
@@ -2338,6 +2364,7 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
int expected_property_count_;
@@ -2356,7 +2383,7 @@ class FunctionLiteral: public Expression {
};
-class SharedFunctionInfoLiteral: public Expression {
+class SharedFunctionInfoLiteral V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
@@ -2376,7 +2403,7 @@ class SharedFunctionInfoLiteral: public Expression {
};
-class ThisFunction: public Expression {
+class ThisFunction V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(ThisFunction)
@@ -2401,10 +2428,10 @@ class RegExpVisitor BASE_EMBEDDED {
};
-class RegExpTree: public ZoneObject {
+class RegExpTree : public ZoneObject {
public:
static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() { }
+ virtual ~RegExpTree() {}
virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) = 0;
@@ -2426,19 +2453,19 @@ class RegExpTree: public ZoneObject {
};
-class RegExpDisjunction: public RegExpTree {
+class RegExpDisjunction V8_FINAL : public RegExpTree {
public:
explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpDisjunction* AsDisjunction();
- virtual Interval CaptureRegisters();
- virtual bool IsDisjunction();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpDisjunction* AsDisjunction() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsDisjunction() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
private:
ZoneList<RegExpTree*>* alternatives_;
@@ -2447,19 +2474,19 @@ class RegExpDisjunction: public RegExpTree {
};
-class RegExpAlternative: public RegExpTree {
+class RegExpAlternative V8_FINAL : public RegExpTree {
public:
explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAlternative* AsAlternative();
- virtual Interval CaptureRegisters();
- virtual bool IsAlternative();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAlternative* AsAlternative() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsAlternative() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* nodes() { return nodes_; }
private:
ZoneList<RegExpTree*>* nodes_;
@@ -2468,7 +2495,7 @@ class RegExpAlternative: public RegExpTree {
};
-class RegExpAssertion: public RegExpTree {
+class RegExpAssertion V8_FINAL : public RegExpTree {
public:
enum AssertionType {
START_OF_LINE,
@@ -2479,22 +2506,22 @@ class RegExpAssertion: public RegExpTree {
NON_BOUNDARY
};
explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAssertion* AsAssertion();
- virtual bool IsAssertion();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAssertion* AsAssertion() V8_OVERRIDE;
+ virtual bool IsAssertion() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
AssertionType assertion_type() { return assertion_type_; }
private:
AssertionType assertion_type_;
};
-class CharacterSet BASE_EMBEDDED {
+class CharacterSet V8_FINAL BASE_EMBEDDED {
public:
explicit CharacterSet(uc16 standard_set_type)
: ranges_(NULL),
@@ -2517,7 +2544,7 @@ class CharacterSet BASE_EMBEDDED {
};
-class RegExpCharacterClass: public RegExpTree {
+class RegExpCharacterClass V8_FINAL : public RegExpTree {
public:
RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
: set_(ranges),
@@ -2525,15 +2552,15 @@ class RegExpCharacterClass: public RegExpTree {
explicit RegExpCharacterClass(uc16 type)
: set_(type),
is_negated_(false) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCharacterClass* AsCharacterClass();
- virtual bool IsCharacterClass();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return 1; }
- virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpCharacterClass* AsCharacterClass() V8_OVERRIDE;
+ virtual bool IsCharacterClass() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return 1; }
+ virtual int max_match() V8_OVERRIDE { return 1; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
@@ -2559,18 +2586,18 @@ class RegExpCharacterClass: public RegExpTree {
};
-class RegExpAtom: public RegExpTree {
+class RegExpAtom V8_FINAL : public RegExpTree {
public:
explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAtom* AsAtom();
- virtual bool IsAtom();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return data_.length(); }
- virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAtom* AsAtom() V8_OVERRIDE;
+ virtual bool IsAtom() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return data_.length(); }
+ virtual int max_match() V8_OVERRIDE { return data_.length(); }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@@ -2578,18 +2605,18 @@ class RegExpAtom: public RegExpTree {
};
-class RegExpText: public RegExpTree {
+class RegExpText V8_FINAL : public RegExpTree {
public:
explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpText* AsText();
- virtual bool IsText();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return length_; }
- virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpText* AsText() V8_OVERRIDE;
+ virtual bool IsText() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return length_; }
+ virtual int max_match() V8_OVERRIDE { return length_; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
void AddElement(TextElement elm, Zone* zone) {
elements_.Add(elm, zone);
length_ += elm.length();
@@ -2601,7 +2628,7 @@ class RegExpText: public RegExpTree {
};
-class RegExpQuantifier: public RegExpTree {
+class RegExpQuantifier V8_FINAL : public RegExpTree {
public:
enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
@@ -2616,9 +2643,9 @@ class RegExpQuantifier: public RegExpTree {
max_match_ = max * body->max_match();
}
}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
+ RegExpNode* on_success) V8_OVERRIDE;
static RegExpNode* ToNode(int min,
int max,
bool is_greedy,
@@ -2626,11 +2653,11 @@ class RegExpQuantifier: public RegExpTree {
RegExpCompiler* compiler,
RegExpNode* on_success,
bool not_at_start = false);
- virtual RegExpQuantifier* AsQuantifier();
- virtual Interval CaptureRegisters();
- virtual bool IsQuantifier();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ virtual RegExpQuantifier* AsQuantifier() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsQuantifier() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
int min() { return min_; }
int max() { return max_; }
bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
@@ -2648,24 +2675,24 @@ class RegExpQuantifier: public RegExpTree {
};
-class RegExpCapture: public RegExpTree {
+class RegExpCapture V8_FINAL : public RegExpTree {
public:
explicit RegExpCapture(RegExpTree* body, int index)
: body_(body), index_(index) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
+ RegExpNode* on_success) V8_OVERRIDE;
static RegExpNode* ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
RegExpNode* on_success);
- virtual RegExpCapture* AsCapture();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual Interval CaptureRegisters();
- virtual bool IsCapture();
- virtual int min_match() { return body_->min_match(); }
- virtual int max_match() { return body_->max_match(); }
+ virtual RegExpCapture* AsCapture() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsCapture() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return body_->min_match(); }
+ virtual int max_match() V8_OVERRIDE { return body_->max_match(); }
RegExpTree* body() { return body_; }
int index() { return index_; }
static int StartRegister(int index) { return index * 2; }
@@ -2677,7 +2704,7 @@ class RegExpCapture: public RegExpTree {
};
-class RegExpLookahead: public RegExpTree {
+class RegExpLookahead V8_FINAL : public RegExpTree {
public:
RegExpLookahead(RegExpTree* body,
bool is_positive,
@@ -2688,15 +2715,15 @@ class RegExpLookahead: public RegExpTree {
capture_count_(capture_count),
capture_from_(capture_from) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpLookahead* AsLookahead();
- virtual Interval CaptureRegisters();
- virtual bool IsLookahead();
- virtual bool IsAnchoredAtStart();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpLookahead* AsLookahead() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsLookahead() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
RegExpTree* body() { return body_; }
bool is_positive() { return is_positive_; }
int capture_count() { return capture_count_; }
@@ -2710,17 +2737,17 @@ class RegExpLookahead: public RegExpTree {
};
-class RegExpBackReference: public RegExpTree {
+class RegExpBackReference V8_FINAL : public RegExpTree {
public:
explicit RegExpBackReference(RegExpCapture* capture)
: capture_(capture) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpBackReference* AsBackReference();
- virtual bool IsBackReference();
- virtual int min_match() { return 0; }
- virtual int max_match() { return capture_->max_match(); }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpBackReference* AsBackReference() V8_OVERRIDE;
+ virtual bool IsBackReference() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return capture_->max_match(); }
int index() { return capture_->index(); }
RegExpCapture* capture() { return capture_; }
private:
@@ -2728,16 +2755,16 @@ class RegExpBackReference: public RegExpTree {
};
-class RegExpEmpty: public RegExpTree {
+class RegExpEmpty V8_FINAL : public RegExpTree {
public:
RegExpEmpty() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpEmpty* AsEmpty();
- virtual bool IsEmpty();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpEmpty* AsEmpty() V8_OVERRIDE;
+ virtual bool IsEmpty() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
static RegExpEmpty* GetInstance() {
static RegExpEmpty* instance = ::new RegExpEmpty();
return instance;
@@ -2761,7 +2788,7 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
class AstVisitor BASE_EMBEDDED {
public:
AstVisitor() {}
- virtual ~AstVisitor() { }
+ virtual ~AstVisitor() {}
// Stack overflow check and dynamic dispatch.
virtual void Visit(AstNode* node) = 0;
@@ -2781,7 +2808,7 @@ class AstVisitor BASE_EMBEDDED {
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
- virtual void Visit(AstNode* node) { \
+ virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
@@ -2797,8 +2824,8 @@ public: \
} \
\
private: \
- void InitializeAstVisitor() { \
- isolate_ = Isolate::Current(); \
+ void InitializeAstVisitor(Isolate* isolate) { \
+ isolate_ = isolate; \
stack_overflow_ = false; \
} \
Isolate* isolate() { return isolate_; } \
@@ -2812,9 +2839,10 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() { }
+ AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
AstProperties* ast_properties() { return &properties_; }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
private:
template<class> friend class AstNodeFactory;
@@ -2827,8 +2855,12 @@ class AstConstructionVisitor BASE_EMBEDDED {
void increase_node_count() { properties_.add_node_count(1); }
void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
+ void set_dont_optimize_reason(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
AstProperties properties_;
+ BailoutReason dont_optimize_reason_;
};
@@ -2847,7 +2879,7 @@ class AstNullVisitor BASE_EMBEDDED {
// AstNode factory
template<class Visitor>
-class AstNodeFactory BASE_EMBEDDED {
+class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
AstNodeFactory(Isolate* isolate, Zone* zone)
: isolate_(isolate),
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 2a385aa48..0756aefb0 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -45,10 +45,6 @@
#include "extensions/statistics-extension.h"
#include "code-stubs.h"
-#if defined(V8_I18N_SUPPORT)
-#include "extensions/i18n/i18n-extension.h"
-#endif
-
namespace v8 {
namespace internal {
@@ -98,7 +94,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache_.Initialize(create_heap_objects);
+ extensions_cache_.Initialize(isolate_, create_heap_objects);
}
@@ -106,9 +102,6 @@ void Bootstrapper::InitializeOncePerProcess() {
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
-#if defined(V8_I18N_SUPPORT)
- v8_i18n::Extension::Register();
-#endif
}
@@ -147,7 +140,7 @@ void Bootstrapper::TearDown() {
delete_these_arrays_on_tear_down_ = NULL;
}
- extensions_cache_.Initialize(false); // Yes, symmetrical
+ extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -491,7 +484,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// prototype, otherwise the missing initial_array_prototype will cause
// assertions during startup.
native_context()->set_initial_array_prototype(*prototype);
- SetPrototype(object_fun, prototype);
+ Accessors::FunctionSetPrototype(object_fun, prototype);
}
// Allocate the empty function as the prototype for function ECMAScript
@@ -1064,6 +1057,54 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_json_object(*json_object);
}
+ { // -- A r r a y B u f f e r
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(
+ global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_array_buffer_fun(*array_buffer_fun);
+ }
+
+ { // -- T y p e d A r r a y s
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
+ EXTERNAL_BYTE_ELEMENTS);
+ native_context()->set_int8_array_fun(*int8_fun);
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
+ EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
+ native_context()->set_uint8_array_fun(*uint8_fun);
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
+ EXTERNAL_SHORT_ELEMENTS);
+ native_context()->set_int16_array_fun(*int16_fun);
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
+ EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
+ native_context()->set_uint16_array_fun(*uint16_fun);
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
+ EXTERNAL_INT_ELEMENTS);
+ native_context()->set_int32_array_fun(*int32_fun);
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
+ EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ native_context()->set_uint32_array_fun(*uint32_fun);
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
+ EXTERNAL_FLOAT_ELEMENTS);
+ native_context()->set_float_array_fun(*float_fun);
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
+ EXTERNAL_DOUBLE_ELEMENTS);
+ native_context()->set_double_array_fun(*double_fun);
+ Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
+ EXTERNAL_PIXEL_ELEMENTS);
+ native_context()->set_uint8c_array_fun(*uint8c_fun);
+
+ Handle<JSFunction> data_view_fun =
+ InstallFunction(
+ global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSizeWithInternalFields,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_data_view_fun(*data_view_fun);
+ }
+
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
@@ -1095,12 +1136,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
+ Object::FORCE_TAGGED, FORCE_FIELD));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
+ Object::FORCE_TAGGED, FORCE_FIELD));
#ifdef DEBUG
LookupResult lookup(isolate);
@@ -1268,13 +1309,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
native_context()->set_embedder_data(*embedder_data);
- {
- // Initialize the random seed slot.
- Handle<ByteArray> zeroed_byte_array(
- factory->NewByteArray(kRandomStateSize));
- native_context()->set_random_seed(*zeroed_byte_array);
- memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
- }
+ // Allocate the random seed slot.
+ Handle<ByteArray> random_seed = factory->NewByteArray(kRandomStateSize);
+ native_context()->set_random_seed(*random_seed);
}
@@ -1331,56 +1368,6 @@ void Genesis::InitializeExperimentalGlobal() {
}
}
- if (FLAG_harmony_array_buffer) {
- // -- A r r a y B u f f e r
- Handle<JSFunction> array_buffer_fun =
- InstallFunction(
- global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithInternalFields,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- native_context()->set_array_buffer_fun(*array_buffer_fun);
- }
-
- if (FLAG_harmony_typed_arrays) {
- // -- T y p e d A r r a y s
- Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
- EXTERNAL_BYTE_ELEMENTS);
- native_context()->set_int8_array_fun(*int8_fun);
- Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
- native_context()->set_uint8_array_fun(*uint8_fun);
- Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
- EXTERNAL_SHORT_ELEMENTS);
- native_context()->set_int16_array_fun(*int16_fun);
- Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
- native_context()->set_uint16_array_fun(*uint16_fun);
- Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
- EXTERNAL_INT_ELEMENTS);
- native_context()->set_int32_array_fun(*int32_fun);
- Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
- EXTERNAL_UNSIGNED_INT_ELEMENTS);
- native_context()->set_uint32_array_fun(*uint32_fun);
- Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
- EXTERNAL_FLOAT_ELEMENTS);
- native_context()->set_float_array_fun(*float_fun);
- Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
- EXTERNAL_DOUBLE_ELEMENTS);
- native_context()->set_double_array_fun(*double_fun);
- Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
- EXTERNAL_PIXEL_ELEMENTS);
- native_context()->set_uint8c_array_fun(*uint8c_fun);
-
- Handle<JSFunction> data_view_fun =
- InstallFunction(
- global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSizeWithInternalFields,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- native_context()->set_data_view_fun(*data_view_fun);
- }
-
if (FLAG_harmony_generators) {
// Create generator meta-objects and install them on the builtins object.
Handle<JSObject> builtins(native_context()->builtins());
@@ -1554,7 +1541,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
: top_context->global_object(),
isolate);
bool has_pending_exception;
- Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
return true;
}
@@ -1632,7 +1619,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
true, true);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(array_function, prototype);
+ Accessors::FunctionSetPrototype(array_function, prototype);
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
@@ -1730,7 +1717,7 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(script_fun, prototype);
+ Accessors::FunctionSetPrototype(script_fun, prototype);
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -1886,7 +1873,7 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(opaque_reference_fun, prototype);
+ Accessors::FunctionSetPrototype(opaque_reference_fun, prototype);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -2060,16 +2047,6 @@ bool Genesis::InstallExperimentalNatives() {
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
- if (FLAG_harmony_array_buffer &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native arraybuffer.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_typed_arrays &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native typedarray.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
if (FLAG_harmony_generators &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native generator.js") == 0) {
@@ -2307,12 +2284,6 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
InstallExtension(isolate, "v8/statistics", &extension_states);
}
-#if defined(V8_I18N_SUPPORT)
- if (FLAG_enable_i18n) {
- InstallExtension(isolate, "v8/i18n", &extension_states);
- }
-#endif
-
if (extensions == NULL) return true;
// Install required extensions
int count = v8::ImplementationUtilities::GetNameCount(extensions);
@@ -2600,8 +2571,8 @@ Genesis::Genesis(Isolate* isolate,
: isolate_(isolate),
active_(isolate->bootstrapper()) {
result_ = Handle<Context>::null();
- // If V8 isn't running and cannot be initialized, just return.
- if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
+ // If V8 cannot be initialized, just return.
+ if (!V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
@@ -2616,7 +2587,7 @@ Genesis::Genesis(Isolate* isolate,
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
if (isolate->initialized_from_snapshot()) {
- native_context_ = Snapshot::NewContextFromSnapshot();
+ native_context_ = Snapshot::NewContextFromSnapshot(isolate);
} else {
native_context_ = Handle<Context>();
}
@@ -2659,6 +2630,14 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
+ // Initially seed the per-context random number generator
+ // using the per-isolate random number generator.
+ uint32_t* state = reinterpret_cast<uint32_t*>(
+ native_context()->random_seed()->GetDataStartAddress());
+ do {
+ isolate->random_number_generator()->NextBytes(state, kRandomStateSize);
+ } while (state[0] == 0 || state[1] == 0);
+
result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 309780039..bac9f4037 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -44,8 +44,8 @@ class SourceCodeCache BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
+ void Initialize(Isolate* isolate, bool create_heap_objects) {
+ cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
}
void Iterate(ObjectVisitor* v) {
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 4a5cd03b6..9290852dc 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -132,7 +132,6 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
MUST_USE_RESULT static MaybeObject* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
- ASSERT(isolate == Isolate::Current()); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
@@ -304,11 +303,11 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
} else {
entry_size = kDoubleSize;
}
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- ASSERT(!HEAP->lo_space()->Contains(elms));
+ ASSERT(!heap->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
@@ -448,7 +447,8 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
argv[i] = args.at<Object>(i + 1);
}
bool pending_exception;
- Handle<Object> result = Execution::Call(function,
+ Handle<Object> result = Execution::Call(isolate,
+ function,
args.receiver(),
argc,
argv.start(),
@@ -594,7 +594,7 @@ BUILTIN(ArrayPop) {
if (accessor->HasElement(array, array, new_length, elms_obj)) {
maybe_result = accessor->Get(array, array, new_length, elms_obj);
} else {
- maybe_result = array->GetPrototype()->GetElement(len - 1);
+ maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
}
if (maybe_result->IsFailure()) return maybe_result;
MaybeObject* maybe_failure =
@@ -1253,8 +1253,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
if (!raw_call_data->IsUndefined()) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
Object* result;
@@ -1322,8 +1322,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
ASSERT(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@@ -1461,6 +1461,16 @@ static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
}
+static void Generate_StoreIC_PreMonomorphic(MacroAssembler* masm) {
+ StoreIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_StoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+ StoreIC::GeneratePreMonomorphic(masm);
+}
+
+
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@@ -1546,6 +1556,16 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
}
+static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
+ KeyedStoreIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+ KeyedStoreIC::GeneratePreMonomorphic(masm);
+}
+
+
static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
@@ -1717,9 +1737,8 @@ void Builtins::InitBuiltinFunctionTable() {
}
-void Builtins::SetUp(bool create_heap_objects) {
+void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
ASSERT(!initialized_);
- Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
// Create a scope for the handles in the builtins.
@@ -1813,6 +1832,16 @@ const char* Builtins::Lookup(byte* pc) {
}
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
Handle<Code> Builtins::name() { \
Code** code_address = \
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index bb36c0251..c712f1ee0 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -87,8 +87,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(InstallRecompiledCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@@ -103,7 +101,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
+ V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -122,7 +120,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
+ V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -144,7 +142,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
- V(LoadIC_Slow, LOAD_IC, GENERIC, \
+ V(LoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
@@ -162,6 +160,8 @@ enum BuiltinExtraArguments {
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
@@ -174,6 +174,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
+ V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
+ kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
@@ -185,11 +187,15 @@ enum BuiltinExtraArguments {
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
@@ -211,6 +217,10 @@ enum BuiltinExtraArguments {
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -288,7 +298,7 @@ class Builtins {
// Generate all builtin code objects. Should be called once during
// isolate initialization.
- void SetUp(bool create_heap_objects);
+ void SetUp(Isolate* isolate, bool create_heap_objects);
void TearDown();
// Garbage collection support.
@@ -370,8 +380,7 @@ class Builtins {
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
- static void Generate_InstallRecompiledCode(MacroAssembler* masm);
- static void Generate_ParallelRecompile(MacroAssembler* masm);
+ static void Generate_ConcurrentRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
@@ -395,6 +404,9 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
+ static void Generate_InterruptCheck(MacroAssembler* masm);
+ static void Generate_StackCheck(MacroAssembler* masm);
+
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 82086824d..7108d1889 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -31,33 +31,19 @@
#include "platform.h"
-// TODO(isolates): is it necessary to lift this?
-static int fatal_error_handler_nesting_depth = 0;
-
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::AllowHandleDereference allow_deref;
i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
- fatal_error_handler_nesting_depth++;
- // First time we try to print an error message
- if (fatal_error_handler_nesting_depth < 2) {
- i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n");
- i::OS::DumpBacktrace();
- }
- // First two times we may try to print a stack dump.
- if (fatal_error_handler_nesting_depth < 3) {
- if (i::FLAG_stack_trace_on_abort) {
- // Call this one twice on double fault
- i::Isolate::Current()->PrintStack(stderr);
- }
- }
+ i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
+ va_list arguments;
+ va_start(arguments, format);
+ i::OS::VPrintError(format, arguments);
+ va_end(arguments);
+ i::OS::PrintError("\n#\n");
+ i::OS::DumpBacktrace();
i::OS::Abort();
}
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index b309e2c42..f5c5f232b 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -31,6 +31,7 @@
#include <string.h>
#include "../include/v8stdint.h"
+
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -196,6 +197,20 @@ inline void CheckEqualsHelper(const char* file,
inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ int64_t expected,
+ const char* value_source,
+ int64_t value) {
+ if (expected == value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
double expected,
@@ -232,7 +247,7 @@ inline void CheckNonEqualsHelper(const char* file,
// Use C++11 static_assert if possible, which gives error
// messages that are easier to understand on first sight.
-#if __cplusplus >= 201103L
+#if V8_HAS_CXX11_STATIC_ASSERT
#define STATIC_CHECK(test) static_assert(test, #test)
#else
// This is inspired by the static assertion facility in boost. This
diff --git a/deps/v8/src/circular-queue-inl.h b/deps/v8/src/circular-queue-inl.h
index b48070ab5..dfb703157 100644
--- a/deps/v8/src/circular-queue-inl.h
+++ b/deps/v8/src/circular-queue-inl.h
@@ -33,30 +33,60 @@
namespace v8 {
namespace internal {
+template<typename T, unsigned L>
+SamplingCircularQueue<T, L>::SamplingCircularQueue()
+ : enqueue_pos_(buffer_),
+ dequeue_pos_(buffer_) {
+}
+
+
+template<typename T, unsigned L>
+SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
+}
+
+
+template<typename T, unsigned L>
+T* SamplingCircularQueue<T, L>::Peek() {
+ MemoryBarrier();
+ if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
+ return &dequeue_pos_->record;
+ }
+ return NULL;
+}
+
+
+template<typename T, unsigned L>
+void SamplingCircularQueue<T, L>::Remove() {
+ Release_Store(&dequeue_pos_->marker, kEmpty);
+ dequeue_pos_ = Next(dequeue_pos_);
+}
-void* SamplingCircularQueue::Enqueue() {
- if (producer_pos_->enqueue_pos == producer_pos_->next_chunk_pos) {
- if (producer_pos_->enqueue_pos == buffer_ + buffer_size_) {
- producer_pos_->next_chunk_pos = buffer_;
- producer_pos_->enqueue_pos = buffer_;
- }
- Acquire_Store(producer_pos_->next_chunk_pos, kEnqueueStarted);
- // Skip marker.
- producer_pos_->enqueue_pos += 1;
- producer_pos_->next_chunk_pos += chunk_size_;
+
+template<typename T, unsigned L>
+T* SamplingCircularQueue<T, L>::StartEnqueue() {
+ MemoryBarrier();
+ if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
+ return &enqueue_pos_->record;
}
- void* result = producer_pos_->enqueue_pos;
- producer_pos_->enqueue_pos += record_size_;
- return result;
+ return NULL;
}
-void SamplingCircularQueue::WrapPositionIfNeeded(
- SamplingCircularQueue::Cell** pos) {
- if (*pos == buffer_ + buffer_size_) *pos = buffer_;
+template<typename T, unsigned L>
+void SamplingCircularQueue<T, L>::FinishEnqueue() {
+ Release_Store(&enqueue_pos_->marker, kFull);
+ enqueue_pos_ = Next(enqueue_pos_);
}
+template<typename T, unsigned L>
+typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
+ Entry* entry) {
+ Entry* next = entry + 1;
+ if (next == &buffer_[L]) return buffer_;
+ return next;
+}
+
} } // namespace v8::internal
#endif // V8_CIRCULAR_QUEUE_INL_H_
diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc
deleted file mode 100644
index 0aea34359..000000000
--- a/deps/v8/src/circular-queue.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "circular-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes,
- size_t desired_chunk_size_in_bytes,
- unsigned buffer_size_in_chunks)
- : record_size_(record_size_in_bytes / sizeof(Cell)),
- chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
- record_size_in_bytes + sizeof(Cell)),
- chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
- buffer_size_(chunk_size_ * buffer_size_in_chunks),
- buffer_(NewArray<Cell>(buffer_size_)) {
- ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes);
- ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_);
- ASSERT(buffer_size_in_chunks > 2);
- // Mark all chunks as clear.
- for (size_t i = 0; i < buffer_size_; i += chunk_size_) {
- buffer_[i] = kClear;
- }
-
- // Layout producer and consumer position pointers each on their own
- // cache lines to avoid cache lines thrashing due to simultaneous
- // updates of positions by different processor cores.
- const int positions_size =
- RoundUp(1, kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ProducerPosition)),
- kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
- kProcessorCacheLineSize);
- positions_ = NewArray<byte>(positions_size);
-
- producer_pos_ = reinterpret_cast<ProducerPosition*>(
- RoundUp(positions_, kProcessorCacheLineSize));
- producer_pos_->next_chunk_pos = buffer_;
- producer_pos_->enqueue_pos = buffer_;
-
- consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
- reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
- ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
- positions_ + positions_size);
- consumer_pos_->dequeue_chunk_pos = buffer_;
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- const size_t producer_consumer_distance = (2 * chunk_size_);
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance;
- consumer_pos_->dequeue_pos = NULL;
-}
-
-
-SamplingCircularQueue::~SamplingCircularQueue() {
- DeleteArray(positions_);
- DeleteArray(buffer_);
-}
-
-
-void* SamplingCircularQueue::StartDequeue() {
- if (consumer_pos_->dequeue_pos != NULL) {
- return consumer_pos_->dequeue_pos;
- } else {
- if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) {
- // Skip marker.
- consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1;
- consumer_pos_->dequeue_end_pos =
- consumer_pos_->dequeue_chunk_pos + chunk_size_;
- return consumer_pos_->dequeue_pos;
- } else {
- return NULL;
- }
- }
-}
-
-
-void SamplingCircularQueue::FinishDequeue() {
- consumer_pos_->dequeue_pos += record_size_;
- if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
- // Move to next chunk.
- consumer_pos_->dequeue_pos = NULL;
- *consumer_pos_->dequeue_chunk_pos = kClear;
- consumer_pos_->dequeue_chunk_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
- consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
-}
-
-
-void SamplingCircularQueue::FlushResidualRecords() {
- // Eliminate producer / consumer distance.
- consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h
index 4ad4f4b55..94bc89e7d 100644
--- a/deps/v8/src/circular-queue.h
+++ b/deps/v8/src/circular-queue.h
@@ -28,6 +28,8 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
+#include "v8globals.h"
+
namespace v8 {
namespace internal {
@@ -35,67 +37,49 @@ namespace internal {
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
-// previous unread records are overwritten. The queue is designed with
+// StartEnqueue will return NULL. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
-//
-// IMPORTANT: as a producer never checks for chunks cleanness, it is
-// possible that it can catch up and overwrite a chunk that a consumer
-// is currently reading, resulting in a corrupt record being read.
+template<typename T, unsigned Length>
class SamplingCircularQueue {
public:
// Executed on the application thread.
- SamplingCircularQueue(size_t record_size_in_bytes,
- size_t desired_chunk_size_in_bytes,
- unsigned buffer_size_in_chunks);
+ SamplingCircularQueue();
~SamplingCircularQueue();
- // Enqueue returns a pointer to a memory location for storing the next
- // record.
- INLINE(void* Enqueue());
+ // StartEnqueue returns a pointer to a memory location for storing the next
+ // record or NULL if all entries are full at the moment.
+ T* StartEnqueue();
+ // Notifies the queue that the producer has complete writing data into the
+ // memory returned by StartEnqueue and it can be passed to the consumer.
+ void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
- // StartDequeue returns a pointer to a memory location for retrieving
- // the next record. After the record had been read by a consumer,
- // FinishDequeue must be called. Until that moment, subsequent calls
- // to StartDequeue will return the same pointer.
- void* StartDequeue();
- void FinishDequeue();
- // Due to a presence of slipping between the producer and the consumer,
- // the queue must be notified whether producing has been finished in order
- // to process remaining records from the buffer.
- void FlushResidualRecords();
-
- typedef AtomicWord Cell;
+ // Retrieves, but does not remove, the head of this queue, returning NULL
+ // if this queue is empty. After the record had been read by a consumer,
+ // Remove must be called.
+ T* Peek();
+ void Remove();
private:
- // Reserved values for the chunk marker (first Cell in each chunk).
+ // Reserved values for the entry marker.
enum {
- kClear, // Marks clean (processed) chunks.
- kEnqueueStarted // Marks chunks where enqueue started.
+ kEmpty, // Marks clean (processed) entries.
+ kFull // Marks entries already filled by the producer but not yet
+ // completely processed by the consumer.
};
- struct ProducerPosition {
- Cell* next_chunk_pos;
- Cell* enqueue_pos;
- };
- struct ConsumerPosition {
- Cell* dequeue_chunk_pos;
- Cell* dequeue_chunk_poll_pos;
- Cell* dequeue_pos;
- Cell* dequeue_end_pos;
+ struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
+ Entry() : marker(kEmpty) {}
+ T record;
+ Atomic32 marker;
};
- INLINE(void WrapPositionIfNeeded(Cell** pos));
+ Entry* Next(Entry* entry);
- const size_t record_size_;
- const size_t chunk_size_in_bytes_;
- const size_t chunk_size_;
- const size_t buffer_size_;
- Cell* buffer_;
- byte* positions_;
- ProducerPosition* producer_pos_;
- ConsumerPosition* consumer_pos_;
+ Entry buffer_[Length];
+ V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* enqueue_pos_;
+ V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 4f6db35dd..23d4269c8 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -112,6 +112,13 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
+ void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
+ HValue* code_object);
+ void BuildInstallCode(HValue* js_function, HValue* shared_info);
+ void BuildInstallFromOptimizedCodeMap(HValue* js_function,
+ HValue* shared_info,
+ HValue* native_context);
+
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@@ -210,8 +217,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(Stub* stub)
- : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+ explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
+ : CodeStubGraphBuilderBase(isolate, stub) {}
protected:
virtual HValue* BuildCodeStub() {
@@ -278,8 +285,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
template <class Stub>
-static Handle<Code> DoGenerateCode(Stub* stub) {
- Isolate* isolate = Isolate::Current();
+static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
CodeStub::Major major_key =
static_cast<HydrogenCodeStub*>(stub)->MajorKey();
CodeStubInterfaceDescriptor* descriptor =
@@ -295,7 +301,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
ASSERT(descriptor->stack_parameter_count_ == NULL);
return stub->GenerateLightweightMissCode(isolate);
}
- CodeStubGraphBuilder<Stub> builder(stub);
+ CodeStubGraphBuilder<Stub> builder(isolate, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
}
@@ -327,8 +333,8 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
}
-Handle<Code> ToNumberStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -352,7 +358,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
- HValue* elements = AddLoadElements(boilerplate, NULL);
+ HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
@@ -394,8 +400,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
}
-Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -441,8 +447,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
-Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> FastCloneShallowObjectStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -487,23 +493,23 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
}
-Handle<Code> CreateAllocationSiteStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> CreateAllocationSiteStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
- GetParameter(0), GetParameter(1), NULL, NULL,
+ GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
-Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -513,12 +519,12 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
-Handle<Code> LoadFieldStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -528,19 +534,19 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
-Handle<Code> KeyedLoadFieldStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
- GetParameter(0), GetParameter(1), GetParameter(2), NULL,
+ GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
@@ -548,8 +554,8 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
}
-Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> KeyedStoreFastElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -567,8 +573,8 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
}
-Handle<Code> TransitionElementsKindStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
@@ -702,8 +708,8 @@ HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
}
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -717,8 +723,9 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
}
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -731,8 +738,8 @@ HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
}
-Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -744,8 +751,9 @@ HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
}
-Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -757,8 +765,9 @@ HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
}
-Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -770,8 +779,9 @@ HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
}
-Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -796,8 +806,8 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
}
-Handle<Code> CompareNilICStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -815,8 +825,8 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
}
-Handle<Code> ToBooleanStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ToBooleanStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -864,8 +874,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
}
-Handle<Code> StoreGlobalStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> StoreGlobalStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -878,8 +888,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- Add<HDeoptimize>("Deopt due to --trace-elements-transitions",
- Deoptimizer::EAGER);
+ Add<HDeoptimize>("Tracing elements transitions", Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
@@ -888,19 +897,209 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
casted_stub()->to_kind(),
casted_stub()->is_jsarray());
- BuildUncheckedMonomorphicElementAccess(object, key, value, NULL,
- casted_stub()->is_jsarray(),
- casted_stub()->to_kind(),
- true, ALLOW_RETURN_HOLE,
- casted_stub()->store_mode());
+ BuildUncheckedMonomorphicElementAccess(object, key, value,
+ casted_stub()->is_jsarray(),
+ casted_stub()->to_kind(),
+ true, ALLOW_RETURN_HOLE,
+ casted_stub()->store_mode());
}
return value;
}
-Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
+ HValue* js_function,
+ HValue* native_context,
+ HValue* code_object) {
+ Counters* counters = isolate()->counters();
+ AddIncrementCounter(counters->fast_new_closure_install_optimized(),
+ context());
+
+ // TODO(fschneider): Idea: store proper code pointers in the optimized code
+ // map and either unmangle them on marking or do nothing as the whole map is
+ // discarded on major GC anyway.
+ Add<HStoreCodeEntry>(js_function, code_object);
+
+ // Now link a function into a list of optimized functions.
+ HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
+ HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForNextFunctionLinkPointer(),
+ optimized_functions_list);
+
+ // This store is the only one that should have a write barrier.
+ Add<HStoreNamedField>(native_context,
+ HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
+ js_function);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
+ HValue* shared_info) {
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForNextFunctionLinkPointer(),
+ graph()->GetConstantUndefined());
+ HValue* code_object = Add<HLoadNamedField>(shared_info,
+ HObjectAccess::ForCodeOffset());
+ Add<HStoreCodeEntry>(js_function, code_object);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
+ HValue* js_function,
+ HValue* shared_info,
+ HValue* native_context) {
+ Counters* counters = isolate()->counters();
+ IfBuilder is_optimized(this);
+ HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
+ HObjectAccess::ForOptimizedCodeMap());
+ HValue* null_constant = Add<HConstant>(0);
+ is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
+ is_optimized.Then();
+ {
+ BuildInstallCode(js_function, shared_info);
+ }
+ is_optimized.Else();
+ {
+ AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
+ // optimized_map points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // Map must never be empty, so check the first elements.
+ Label install_optimized;
+ HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstContextSlot());
+ IfBuilder already_in(this);
+ already_in.If<HCompareObjectEqAndBranch>(native_context,
+ first_context_slot);
+ already_in.Then();
+ {
+ HValue* code_object = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstCodeSlot());
+ BuildInstallOptimizedCode(js_function, native_context, code_object);
+ }
+ already_in.Else();
+ {
+ HValue* shared_function_entry_length =
+ Add<HConstant>(SharedFunctionInfo::kEntryLength);
+ LoopBuilder loop_builder(this,
+ context(),
+ LoopBuilder::kPostDecrement,
+ shared_function_entry_length);
+ HValue* array_length = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFixedArrayLength());
+ HValue* key = loop_builder.BeginBody(array_length,
+ graph()->GetConstant0(),
+ Token::GT);
+ {
+ // Iterate through the rest of map backwards.
+ // Do not double check first entry.
+ HValue* second_entry_index =
+ Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
+ IfBuilder restore_check(this);
+ restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
+ Token::EQ);
+ restore_check.Then();
+ {
+ // Store the unoptimized code
+ BuildInstallCode(js_function, shared_info);
+ loop_builder.Break();
+ }
+ restore_check.Else();
+ {
+ HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
+ shared_function_entry_length));
+ HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
+ keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ IfBuilder done_check(this);
+ done_check.If<HCompareObjectEqAndBranch>(native_context,
+ keyed_lookup);
+ done_check.Then();
+ {
+ // Hit: fetch the optimized code.
+ HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
+ keyed_minus, graph()->GetConstant1()));
+ HValue* code_object = Add<HLoadKeyed>(optimized_map,
+ keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ BuildInstallOptimizedCode(js_function, native_context, code_object);
+
+ // Fall out of the loop
+ loop_builder.Break();
+ }
+ done_check.Else();
+ done_check.End();
+ }
+ restore_check.End();
+ }
+ loop_builder.EndBody();
+ }
+ already_in.End();
+ }
+ is_optimized.End();
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
+ Counters* counters = isolate()->counters();
+ Factory* factory = isolate()->factory();
+ HInstruction* empty_fixed_array =
+ Add<HConstant>(factory->empty_fixed_array());
+ HValue* shared_info = GetParameter(0);
+
+ // Create a new closure from the given function info in new space
+ HValue* size = Add<HConstant>(JSFunction::kSize);
+ HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
+ NOT_TENURED, JS_FUNCTION_TYPE);
+ AddIncrementCounter(counters->fast_new_closure_total(), context());
+
+ int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
+ casted_stub()->is_generator());
+
+ // Compute the function map in the current native context and set that
+ // as the map of the allocated object.
+ HInstruction* native_context = BuildGetNativeContext();
+ HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
+ HObjectAccess::ForContextSlot(map_index));
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
+
+ // Initialize the rest of the function.
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForPropertiesPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
+ graph()->GetConstantHole());
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForSharedFunctionInfoPointer(),
+ shared_info);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
+ shared_info);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
+ context());
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ if (FLAG_cache_optimized_code) {
+ BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
+ } else {
+ BuildInstallCode(js_function, shared_info);
+ }
+
+ return js_function;
+}
+
+
+Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index f656bf7d9..ace4af42a 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -46,7 +46,7 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
- miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()),
+ miss_handler_(),
has_miss_handler_(false) { }
@@ -93,8 +93,7 @@ Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
}
-Handle<Code> PlatformCodeStub::GenerateCode() {
- Isolate* isolate = Isolate::Current();
+Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
Factory* factory = isolate->factory();
// Generate the new code.
@@ -137,14 +136,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
- ASSERT(IsPregenerated() == code->is_pregenerated());
+ ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
return Handle<Code>(code);
}
{
HandleScope scope(isolate);
- Handle<Code> new_object = GenerateCode();
+ Handle<Code> new_object = GenerateCode(isolate);
new_object->set_major_key(MajorKey());
FinishCode(new_object);
RecordCodeGeneration(*new_object, isolate);
@@ -596,19 +595,9 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS: {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_js_array_,
- elements_kind_,
- store_mode_);
- }
- break;
+ case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_js_array_,
- store_mode_);
- break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -618,7 +607,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+ UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
@@ -742,8 +731,9 @@ void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer) {
- FunctionEntryHook entry_hook = Isolate::Current()->function_entry_hook();
+ intptr_t stack_pointer,
+ Isolate* isolate) {
+ FunctionEntryHook entry_hook = isolate->function_entry_hook();
ASSERT(entry_hook != NULL);
entry_hook(function, stack_pointer);
}
@@ -769,6 +759,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
+void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
+ FastNewClosureStub stub(STRICT_MODE, false);
+ InstallDescriptor(isolate, &stub);
+}
+
+
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index c58acd6b1..946eb7696 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -102,7 +102,6 @@ namespace internal {
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
- V(RegExpCEntry) \
V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
@@ -159,14 +158,14 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) {
- bool is_pregenerated = IsPregenerated();
+ bool is_pregenerated = IsPregenerated(isolate);
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code, isolate));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
- virtual bool IsPregenerated() { return false; }
+ virtual bool IsPregenerated(Isolate* isolate) { return false; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void GenerateFPStubs(Isolate* isolate);
@@ -205,7 +204,7 @@ class CodeStub BASE_EMBEDDED {
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
- virtual Handle<Code> GenerateCode() = 0;
+ virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
// Returns whether the code generated for this stub needs to be allocated as
@@ -263,7 +262,7 @@ class CodeStub BASE_EMBEDDED {
class PlatformCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@@ -353,7 +352,7 @@ class HydrogenCodeStub : public CodeStub {
CodeStubInterfaceDescriptor* descriptor) = 0;
// Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode() = 0;
+ virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
virtual int NotMissMinorKey() = 0;
@@ -449,35 +448,11 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
-class StackCheckStub : public PlatformCodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class InterruptStub : public PlatformCodeStub {
- public:
- InterruptStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Interrupt; }
- int MinorKey() { return 0; }
-};
-
-
class ToNumberStub: public HydrogenCodeStub {
public:
ToNumberStub() { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -489,20 +464,29 @@ class ToNumberStub: public HydrogenCodeStub {
};
-class FastNewClosureStub : public PlatformCodeStub {
+class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
: language_mode_(language_mode),
is_generator_(is_generator) { }
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ LanguageMode language_mode() const { return language_mode_; }
+ bool is_generator() const { return is_generator_; }
private:
class StrictModeBits: public BitField<bool, 0, 1> {};
class IsGeneratorBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return FastNewClosure; }
- int MinorKey() {
+ int NotMissMinorKey() {
return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
IsGeneratorBits::encode(is_generator_);
}
@@ -554,7 +538,7 @@ class StoreGlobalStub : public HydrogenCodeStub {
IsConstantBits::encode(is_constant);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -636,7 +620,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
return LAST_ELEMENTS_KIND;
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -676,7 +660,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
int length() const { return length_; }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -696,9 +680,9 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
public:
explicit CreateAllocationSiteStub() { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
@@ -752,6 +736,13 @@ class InstanceofStub: public PlatformCodeStub {
};
+enum AllocationSiteOverrideMode {
+ DONT_OVERRIDE,
+ DISABLE_ALLOCATION_SITES,
+ LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
+};
+
+
class ArrayConstructorStub: public PlatformCodeStub {
public:
enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
@@ -761,6 +752,9 @@ class ArrayConstructorStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
private:
+ void GenerateDispatchToArrayStub(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode);
+
virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
virtual int MinorKey() { return argument_count_; }
@@ -913,7 +907,7 @@ class LoadFieldStub: public HandlerStub {
Initialize(Code::LOAD_IC, inobject, index, representation);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -982,7 +976,7 @@ class KeyedLoadFieldStub: public LoadFieldStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
private:
virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
@@ -1253,7 +1247,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
- Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
@@ -1321,7 +1315,7 @@ class CEntryStub : public PlatformCodeStub {
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
private:
@@ -1333,6 +1327,7 @@ class CEntryStub : public PlatformCodeStub {
bool always_allocate_scope);
// Number of pointers/values returned.
+ Isolate* isolate_;
const int result_size_;
SaveFPRegsMode save_doubles_;
@@ -1704,11 +1699,13 @@ class DoubleToIStub : public PlatformCodeStub {
DoubleToIStub(Register source,
Register destination,
int offset,
- bool is_truncating) : bit_field_(0) {
+ bool is_truncating,
+ bool skip_fastpath = false) : bit_field_(0) {
bit_field_ = SourceRegisterBits::encode(source.code_) |
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
- IsTruncatingBits::encode(is_truncating);
+ IsTruncatingBits::encode(is_truncating) |
+ SkipFastPathBits::encode(skip_fastpath);
}
Register source() {
@@ -1725,12 +1722,18 @@ class DoubleToIStub : public PlatformCodeStub {
return IsTruncatingBits::decode(bit_field_);
}
+ bool skip_fastpath() {
+ return SkipFastPathBits::decode(bit_field_);
+ }
+
int offset() {
return OffsetBits::decode(bit_field_);
}
void Generate(MacroAssembler* masm);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@@ -1743,6 +1746,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
class OffsetBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
+ class SkipFastPathBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
@@ -1768,7 +1773,7 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
return ElementsKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1808,7 +1813,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
return StoreModeBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1843,7 +1848,7 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
return ToKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1868,13 +1873,6 @@ enum ContextCheckMode {
};
-enum AllocationSiteOverrideMode {
- DONT_OVERRIDE,
- DISABLE_ALLOCATION_SITES,
- LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
-};
-
-
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
@@ -1882,7 +1880,8 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
- ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
+ ASSERT(!(FLAG_track_allocation_sites &&
+ override_mode == DISABLE_ALLOCATION_SITES) ||
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteOverrideModeBits::encode(override_mode) |
@@ -1901,7 +1900,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
return ContextCheckModeBits::decode(bit_field_);
}
- virtual bool IsPregenerated() {
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE {
// We only pre-generate stubs that verify correct context
return context_mode() == CONTEXT_CHECK_REQUIRED;
}
@@ -1939,7 +1938,7 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1961,7 +1960,7 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1983,7 +1982,7 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2002,7 +2001,7 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
kind_ = kind;
}
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@@ -2026,7 +2025,7 @@ class InternalArrayNoArgumentConstructorStub : public
explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2045,7 +2044,7 @@ class InternalArraySingleArgumentConstructorStub : public
explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2064,7 +2063,7 @@ class InternalArrayNArgumentsConstructorStub : public
explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2153,7 +2152,7 @@ class ToBooleanStub: public HydrogenCodeStub {
bool UpdateStatus(Handle<Object> object);
Types GetTypes() { return types_; }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
@@ -2213,7 +2212,7 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
bool is_jsarray() const { return is_jsarray_; }
KeyedAccessStoreMode store_mode() const { return store_mode_; }
- Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2266,7 +2265,7 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
@@ -2301,7 +2300,8 @@ class ProfileEntryHookStub : public PlatformCodeStub {
private:
static void EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer);
+ intptr_t stack_pointer,
+ Isolate* isolate);
Major MajorKey() { return ProfileEntryHook; }
int MinorKey() { return 0; }
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 2031b321a..d33c7f06b 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -89,12 +89,12 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter().PrintProgram(info->function()));
+ PrettyPrinter(info->isolate()).PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
- AstPrinter().PrintProgram(info->function()));
+ AstPrinter(info->isolate()).PrintProgram(info->function()));
}
#endif // DEBUG
}
@@ -114,11 +114,9 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted);
- if (!code.is_null()) {
- isolate->counters()->total_compiled_code_size()->Increment(
- code->instruction_size());
- code->set_prologue_offset(info->prologue_offset());
- }
+ isolate->counters()->total_compiled_code_size()->Increment(
+ code->instruction_size());
+ code->set_prologue_offset(info->prologue_offset());
return code;
}
@@ -126,7 +124,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
AllowDeferredHandleDereference allow_deference_for_print_code;
- bool print_code = Isolate::Current()->bootstrapper()->IsActive()
+ bool print_code = info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code ||
(info->IsStub() && FLAG_print_code_stubs) ||
@@ -173,9 +171,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
}
-bool CodeGenerator::ShouldGenerateLog(Expression* type) {
+bool CodeGenerator::ShouldGenerateLog(Isolate* isolate, Expression* type) {
ASSERT(type != NULL);
- Isolate* isolate = Isolate::Current();
if (!isolate->logger()->is_logging() &&
!isolate->cpu_profiler()->is_profiling()) {
return false;
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index f6e5daac8..47634ec22 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -58,7 +58,8 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(script->GetIsolate(), BASE, zone);
}
@@ -68,7 +69,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -80,7 +82,8 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -90,7 +93,8 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -119,7 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
- mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+ mode_ = isolate->use_crankshaft() ? mode : NONOPT;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
@@ -226,18 +230,12 @@ bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_self_optimization &&
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
- !function()->flags()->Contains(kDontOptimize) &&
+ !function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
-void CompilationInfo::AbortOptimization() {
- Handle<Code> code(shared_info()->code());
- SetCode(code);
-}
-
-
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
@@ -248,7 +246,7 @@ void CompilationInfo::AbortOptimization() {
// break points has actually been set.
static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- return V8::UseCrankshaft() ?
+ return isolate->use_crankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
#else
@@ -266,10 +264,9 @@ void OptimizingCompiler::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
- double ms_creategraph =
- static_cast<double>(time_taken_to_create_graph_) / 1000;
- double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
- double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
+ double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+ double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+ double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
@@ -317,14 +314,13 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
- ASSERT(V8::UseCrankshaft());
+ ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
// We should never arrive here if there is no code object on the
// shared function object.
- Handle<Code> code(info()->shared_info()->code());
- ASSERT(code->kind() == Code::FUNCTION);
+ ASSERT(info()->shared_info()->code()->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
@@ -334,7 +330,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
if (AlwaysFullCompiler(isolate())) {
- info()->SetCode(code);
+ info()->AbortOptimization();
return SetLastStatus(BAILED_OUT);
}
@@ -362,16 +358,16 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
- if (!info()->osr_ast_id().IsNone() &&
+ if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
info()->set_bailout_reason(kTooManyParametersLocals);
return AbortOptimization();
}
// Take --hydrogen-filter into account.
- if (!info()->closure()->PassesHydrogenFilter()) {
- info()->SetCode(code);
- return SetLastStatus(BAILED_OUT);
+ if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
+ info()->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
}
// Recompile the unoptimized version of the code if the current version
@@ -380,9 +376,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// performance of the hydrogen-based compiler.
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
- int64_t start_ticks = 0;
+ ElapsedTimer timer;
if (FLAG_hydrogen_stats) {
- start_ticks = OS::Ticks();
+ timer.Start();
}
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
@@ -401,8 +397,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
if (FLAG_hydrogen_stats) {
- int64_t ticks = OS::Ticks() - start_ticks;
- isolate()->GetHStatistics()->IncrementFullCodeGen(ticks);
+ isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
}
}
@@ -411,7 +406,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
- ASSERT(FLAG_always_opt || code->optimizable());
+ ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
@@ -503,12 +498,14 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
+ // Add to the weak list of optimized code objects.
+ info()->context()->native_context()->AddOptimizedCode(*info()->code());
return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
- bool is_optimizing = V8::UseCrankshaft() &&
+ bool is_optimizing = info->isolate()->use_crankshaft() &&
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
@@ -728,7 +725,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_data(script_data.is_null() ? HEAP->undefined_value()
+ script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
@@ -745,8 +742,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
compilation_cache->PutScript(source, context, result);
}
} else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
+ if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@@ -808,8 +805,8 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
}
}
} else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
+ if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@@ -843,18 +840,18 @@ static bool InstallFullCode(CompilationInfo* info) {
// Check the function has compiled code.
ASSERT(shared->is_compiled());
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ shared->set_dont_optimize_reason(lit->dont_optimize_reason());
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
- if (V8::UseCrankshaft() &&
+ if (info->isolate()->use_crankshaft() &&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
+ !info->isolate()->DebuggerHasBreakPoints()) {
CompilationInfoWithZone optimized(function);
optimized.SetOptimizing(BailoutId::None());
return Compiler::CompileLazy(&optimized);
@@ -884,9 +881,10 @@ static void InstallCodeCommon(CompilationInfo* info) {
static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- code->kind() == Code::OPTIMIZED_FUNCTION) {
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
+
+ // Cache non-OSR optimized code.
+ if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
@@ -898,9 +896,10 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- info->IsOptimizing()) {
+ if (!info->IsOptimizing()) return false; // Nothing to look up.
+
+ // Lookup non-OSR optimized code.
+ if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<JSFunction> function = info->closure();
ASSERT(!function.is_null());
@@ -956,12 +955,15 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
InstallCodeCommon(info);
if (info->IsOptimizing()) {
+ // Optimized code successfully created.
Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
+ // TODO(titzer): Only replace the code if it was not an OSR compile.
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
- } else {
+ } else if (!info->is_osr()) {
+ // Compilation failed. Replace with full code if not OSR compile.
return InstallFullCode(info);
}
}
@@ -972,38 +974,55 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
-void Compiler::RecompileParallel(Handle<JSFunction> closure) {
- ASSERT(closure->IsMarkedForParallelRecompilation());
+bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
+ uint32_t osr_pc_offset) {
+ bool compiling_for_osr = (osr_pc_offset != 0);
Isolate* isolate = closure->GetIsolate();
- // Here we prepare compile data for the parallel recompilation thread, but
+ // Here we prepare compile data for the concurrent recompilation thread, but
// this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- if (FLAG_trace_parallel_recompilation) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
closure->PrintName();
PrintF(" on next run.\n");
}
- return;
+ return false;
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+
+ if (compiling_for_osr) {
+ BailoutId osr_ast_id =
+ shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
+ ASSERT(!osr_ast_id.IsNone());
+ info->SetOptimizing(osr_ast_id);
+ info->set_osr_pc_offset(osr_pc_offset);
+
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - attempt to queue ");
+ closure->PrintName();
+ PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
+ }
+ } else {
+ info->SetOptimizing(BailoutId::None());
+ }
+
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
- info->SetOptimizing(BailoutId::None());
{
CompilationHandleScope handle_scope(*info);
- if (InstallCodeFromOptimizedCodeMap(*info)) {
- return;
+ if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
+ return true;
}
if (Parser::Parse(*info)) {
@@ -1020,6 +1039,8 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
info.Detach();
shared->code()->set_profiler_ticks(0);
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ ASSERT(!isolate->has_pending_exception());
+ return true;
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
@@ -1028,38 +1049,26 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
- if (shared->code()->back_edges_patched_for_osr()) {
- // At this point we either put the function on recompilation queue or
- // aborted optimization. In either case we want to continue executing
- // the unoptimized code without running into OSR. If the unoptimized
- // code has been patched for OSR, unpatch it.
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code =
- isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(shared->code(),
- *interrupt_code,
- *replacement_code);
- }
-
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ return false;
}
-void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
+Handle<Code> Compiler::InstallOptimizedCode(
+ OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
info->AbortOptimization();
InstallFullCode(*info);
- if (FLAG_trace_parallel_recompilation) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** aborting optimization for ");
info->closure()->PrintName();
PrintF(" as it has been disabled.\n");
}
- ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
- return;
+ ASSERT(!info->closure()->IsInRecompileQueue());
+ return Handle<Code>::null();
}
Isolate* isolate = info->isolate();
@@ -1093,19 +1102,21 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
- if (FLAG_trace_parallel_recompilation) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Optimized code for ");
info->closure()->PrintName();
PrintF(" installed.\n");
}
} else {
- info->SetCode(Handle<Code>(info->shared_info()->code()));
+ info->AbortOptimization();
InstallFullCode(*info);
}
// Optimized code is finally replacing unoptimized code. Reset the latter's
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
- ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
+ ASSERT(!info->closure()->IsInRecompileQueue());
+ return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
+ : Handle<Code>::null();
}
@@ -1193,7 +1204,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
function_info->set_is_function(lit->is_function());
- function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
function_info->set_is_generator(lit->is_generator());
@@ -1216,6 +1227,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ GetScriptColumnNumber(script, shared->start_position()) + 1;
USE(line_num);
if (script->name()->IsString()) {
PROFILE(info->isolate(),
@@ -1224,7 +1237,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*shared,
info,
String::cast(script->name()),
- line_num));
+ line_num,
+ column_num));
} else {
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
@@ -1232,7 +1246,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*shared,
info,
info->isolate()->heap()->empty_string(),
- line_num));
+ line_num,
+ column_num));
}
}
@@ -1247,7 +1262,7 @@ CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
: name_(name), info_(info), zone_(info->isolate()) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
- start_ticks_ = OS::Ticks();
+ timer_.Start();
}
}
@@ -1256,8 +1271,7 @@ CompilationPhase::~CompilationPhase() {
if (FLAG_hydrogen_stats) {
unsigned size = zone()->allocation_size();
size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
- int64_t ticks = OS::Ticks() - start_ticks_;
- isolate()->GetHStatistics()->SaveTiming(name_, ticks, size);
+ isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
}
}
@@ -1265,9 +1279,11 @@ CompilationPhase::~CompilationPhase() {
bool CompilationPhase::ShouldProduceTraceOutput() const {
// Trace if the appropriate trace flag is set and the phase name's first
// character is in the FLAG_trace_phase command line parameter.
- bool tracing_on = info()->IsStub() ?
- FLAG_trace_hydrogen_stubs :
- FLAG_trace_hydrogen;
+ AllowHandleDereference allow_deref;
+ bool tracing_on = info()->IsStub()
+ ? FLAG_trace_hydrogen_stubs
+ : (FLAG_trace_hydrogen &&
+ info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
return (tracing_on &&
OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 7d442f9d4..8ceb61db9 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -60,11 +60,11 @@ class CompilationInfo {
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
virtual ~CompilationInfo();
- Isolate* isolate() {
- ASSERT(Isolate::Current() == isolate_);
+ Isolate* isolate() const {
return isolate_;
}
Zone* zone() { return zone_; }
+ bool is_osr() const { return !osr_ast_id_.IsNone(); }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@@ -235,9 +235,10 @@ class CompilationInfo {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
- // Disable all optimization attempts of this info for the rest of the
- // current compilation pipeline.
- void AbortOptimization();
+ // Reset code to the unoptimized version when optimization is aborted.
+ void AbortOptimization() {
+ SetCode(handle(shared_info()->code()));
+ }
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
@@ -307,6 +308,14 @@ class CompilationInfo {
return abort_due_to_dependency_;
}
+ void set_osr_pc_offset(uint32_t pc_offset) {
+ osr_pc_offset_ = pc_offset;
+ }
+
+ bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
+ return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
+ }
+
protected:
CompilationInfo(Handle<Script> script,
Zone* zone);
@@ -334,7 +343,7 @@ class CompilationInfo {
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
- ASSERT(V8::UseCrankshaft());
+ ASSERT(isolate()->use_crankshaft());
mode_ = mode;
}
@@ -401,6 +410,9 @@ class CompilationInfo {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
+ // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
+ // We can look this up in the back edge table, but cache it for quick access.
+ uint32_t osr_pc_offset_;
// Flag whether compilation needs to be aborted due to dependency change.
bool abort_due_to_dependency_;
@@ -500,9 +512,6 @@ class OptimizingCompiler: public ZoneObject {
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
- time_taken_to_create_graph_(0),
- time_taken_to_optimize_(0),
- time_taken_to_codegen_(0),
last_status_(FAILED) { }
enum Status {
@@ -528,9 +537,9 @@ class OptimizingCompiler: public ZoneObject {
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
- int64_t time_taken_to_create_graph_;
- int64_t time_taken_to_optimize_;
- int64_t time_taken_to_codegen_;
+ TimeDelta time_taken_to_create_graph_;
+ TimeDelta time_taken_to_optimize_;
+ TimeDelta time_taken_to_codegen_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
@@ -540,18 +549,20 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, int64_t* location)
+ Timer(OptimizingCompiler* compiler, TimeDelta* location)
: compiler_(compiler),
- start_(OS::Ticks()),
- location_(location) { }
+ location_(location) {
+ ASSERT(location_ != NULL);
+ timer_.Start();
+ }
~Timer() {
- *location_ += (OS::Ticks() - start_);
+ *location_ += timer_.Elapsed();
}
OptimizingCompiler* compiler_;
- int64_t start_;
- int64_t* location_;
+ ElapsedTimer timer_;
+ TimeDelta* location_;
};
};
@@ -600,7 +611,8 @@ class Compiler : public AllStatic {
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
- static void RecompileParallel(Handle<JSFunction> function);
+ static bool RecompileConcurrent(Handle<JSFunction> function,
+ uint32_t osr_pc_offset = 0);
// Compile a shared function info object (the function is possibly lazily
// compiled).
@@ -613,7 +625,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
- static void InstallOptimizedCode(OptimizingCompiler* info);
+ static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
@@ -643,7 +655,7 @@ class CompilationPhase BASE_EMBEDDED {
CompilationInfo* info_;
Zone zone_;
unsigned info_zone_start_allocation_size_;
- int64_t start_ticks_;
+ ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 0fddfdf50..441ef9d9c 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -74,7 +74,7 @@ Context* Context::native_context() {
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the native context.
- ASSERT(Isolate::Current()->bootstrapper()->IsActive());
+ ASSERT(this->GetIsolate()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
@@ -319,14 +319,48 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
}
+void Context::SetOptimizedFunctionsListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(OPTIMIZED_FUNCTIONS_LIST, head);
+}
+
+
Object* Context::OptimizedFunctionsListHead() {
ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
-void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
+void Context::AddOptimizedCode(Code* code) {
+ ASSERT(IsNativeContext());
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->next_code_link()->IsUndefined());
+ code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
+ set(OPTIMIZED_CODE_LIST, code);
+}
+
+
+void Context::SetOptimizedCodeListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(OPTIMIZED_CODE_LIST, head);
+}
+
+
+Object* Context::OptimizedCodeListHead() {
+ ASSERT(IsNativeContext());
+ return get(OPTIMIZED_CODE_LIST);
+}
+
+
+void Context::SetDeoptimizedCodeListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(DEOPTIMIZED_CODE_LIST, head);
+}
+
+
+Object* Context::DeoptimizedCodeListHead() {
+ ASSERT(IsNativeContext());
+ return get(DEOPTIMIZED_CODE_LIST);
}
@@ -352,10 +386,9 @@ bool Context::IsBootstrappingOrValidParentContext(
}
-bool Context::IsBootstrappingOrGlobalObject(Object* object) {
+bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
- Isolate* isolate = Isolate::Current();
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() ||
object->IsGlobalObject();
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index fdf6d27ef..189c215e6 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -337,8 +337,10 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
- MAP_CACHE_INDEX, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
+ OPTIMIZED_CODE_LIST, // Weak.
+ DEOPTIMIZED_CODE_LIST, // Weak.
+ MAP_CACHE_INDEX, // Weak.
+ NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
@@ -370,7 +372,7 @@ class Context: public FixedArray {
GlobalObject* global_object() {
Object* result = get(GLOBAL_OBJECT_INDEX);
- ASSERT(IsBootstrappingOrGlobalObject(result));
+ ASSERT(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global_object(GlobalObject* object) {
@@ -428,11 +430,19 @@ class Context: public FixedArray {
// Mark the native context with out of memory.
inline void mark_out_of_memory();
- // A native context hold a list of all functions which have been optimized.
+ // A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
+ void SetOptimizedFunctionsListHead(Object* head);
Object* OptimizedFunctionsListHead();
- void ClearOptimizedFunctions();
+
+ // The native context also stores a list of all optimized code and a
+ // list of all deoptimized code, which are needed by the deoptimizer.
+ void AddOptimizedCode(Code* code);
+ void SetOptimizedCodeListHead(Object* head);
+ Object* OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object* head);
+ Object* DeoptimizedCodeListHead();
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
@@ -508,7 +518,7 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
- static bool IsBootstrappingOrGlobalObject(Object* object);
+ static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 183941206..e0a6a60a0 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -41,7 +41,7 @@ StatsTable::StatsTable()
int* StatsCounter::FindLocationInStatsTable() const {
- return Isolate::Current()->stats_table()->FindLocation(name_);
+ return isolate_->stats_table()->FindLocation(name_);
}
@@ -60,8 +60,7 @@ void* Histogram::CreateHistogram() const {
// Start the timer.
void HistogramTimer::Start() {
if (Enabled()) {
- stop_time_ = 0;
- start_time_ = OS::Ticks();
+ timer_.Start();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::START, name()));
@@ -72,10 +71,9 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {
- stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- AddSample(milliseconds);
+ AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
+ timer_.Stop();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::END, name()));
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index a633fea77..93911d721 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -116,8 +116,8 @@ class StatsTable {
class StatsCounter {
public:
StatsCounter() { }
- explicit StatsCounter(const char* name)
- : name_(name), ptr_(NULL), lookup_done_(false) { }
+ explicit StatsCounter(Isolate* isolate, const char* name)
+ : isolate_(isolate), name_(name), ptr_(NULL), lookup_done_(false) { }
// Sets the counter to a specific value.
void Set(int value) {
@@ -175,6 +175,7 @@ class StatsCounter {
private:
int* FindLocationInStatsTable() const;
+ Isolate* isolate_;
const char* name_;
int* ptr_;
bool lookup_done_;
@@ -245,9 +246,7 @@ class HistogramTimer : public Histogram {
int max,
int num_buckets,
Isolate* isolate)
- : Histogram(name, min, max, num_buckets, isolate),
- start_time_(0),
- stop_time_(0) { }
+ : Histogram(name, min, max, num_buckets, isolate) {}
// Start the timer.
void Start();
@@ -257,12 +256,11 @@ class HistogramTimer : public Histogram {
// Returns true if the timer is running.
bool Running() {
- return Enabled() && (start_time_ != 0) && (stop_time_ == 0);
+ return Enabled() && timer_.IsStarted();
}
private:
- int64_t start_time_;
- int64_t stop_time_;
+ ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 868ec64fd..7bfbf5c57 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -67,13 +67,30 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
+TickSample* CpuProfiler::StartTickSample() {
+ if (is_profiling_) return processor_->StartTickSample();
+ return NULL;
+}
+
+
+void CpuProfiler::FinishTickSample() {
+ processor_->FinishTickSample();
+}
+
+
+TickSample* ProfilerEventsProcessor::StartTickSample() {
+ void* address = ticks_buffer_.StartEnqueue();
+ if (address == NULL) return NULL;
TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(last_code_event_id_);
+ new(address) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}
+void ProfilerEventsProcessor::FinishTickSample() {
+ ticks_buffer_.FinishEnqueue();
+}
+
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_INL_H_
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 747542f73..e0f7aea18 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -40,18 +40,18 @@
namespace v8 {
namespace internal {
-static const int kTickSamplesBufferChunkSize = 64 * KB;
-static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(
+ ProfileGenerator* generator,
+ Sampler* sampler,
+ TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
+ period_(period),
last_code_event_id_(0), last_processed_code_event_id_(0) {
}
@@ -103,54 +103,54 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
return false;
}
-
-bool ProfilerEventsProcessor::ProcessTicks() {
- while (true) {
- while (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order ==
- last_processed_code_event_id_) {
- TickSampleEventRecord record;
- ticks_from_vm_buffer_.Dequeue(&record);
- generator_->RecordTickSample(record.sample);
- }
-
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order != last_processed_code_event_id_) return true;
-
- // A paranoid check to make sure that we don't get a memory overrun
- // in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
+ProfilerEventsProcessor::SampleProcessingResult
+ ProfilerEventsProcessor::ProcessOneSample() {
+ if (!ticks_from_vm_buffer_.IsEmpty()
+ && ticks_from_vm_buffer_.Peek()->order ==
+ last_processed_code_event_id_) {
+ TickSampleEventRecord record;
+ ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
+ return OneSampleProcessed;
+ }
+
+ const TickSampleEventRecord* record = ticks_buffer_.Peek();
+ if (record == NULL) {
+ if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
+ return FoundSampleForNextCodeEvent;
+ }
+ if (record->order != last_processed_code_event_id_) {
+ return FoundSampleForNextCodeEvent;
}
+ generator_->RecordTickSample(record->sample);
+ ticks_buffer_.Remove();
+ return OneSampleProcessed;
}
void ProfilerEventsProcessor::Run() {
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks()) {
- // All ticks of the current last_processed_code_event_id_ are processed,
- // proceed to the next code event.
- ProcessCodeEvent();
- }
- YieldCPU();
+ ElapsedTimer timer;
+ timer.Start();
+ // Keep processing existing events until we need to do next sample.
+ do {
+ if (FoundSampleForNextCodeEvent == ProcessOneSample()) {
+ // All ticks of the current last_processed_code_event_id_ are
+ // processed, proceed to the next code event.
+ ProcessCodeEvent();
+ }
+ } while (!timer.HasExpired(period_));
+
+ // Schedule next sample. sampler_ is NULL in tests.
+ if (sampler_) sampler_->DoSample();
}
// Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
do {
- ProcessTicks();
+ SampleProcessingResult result;
+ do {
+ result = ProcessOneSample();
+ } while (result == OneSampleProcessed);
} while (ProcessCodeEvent());
}
@@ -166,12 +166,6 @@ CpuProfile* CpuProfiler::GetProfile(int index) {
}
-TickSample* CpuProfiler::TickSampleEvent() {
- if (is_profiling_) return processor_->TickSampleEvent();
- return NULL;
-}
-
-
void CpuProfiler::DeleteAllProfiles() {
if (is_profiling_) StopProcessor();
ResetProfiles();
@@ -253,6 +247,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
ASSERT(Script::cast(shared->script()));
Script* script = Script::cast(shared->script());
rec->entry->set_script_id(script->id()->value());
+ rec->entry->set_bailout_reason(
+ GetBailoutReason(shared->DisableOptimizationReason()));
}
rec->size = code->ExecutableSize();
rec->shared = shared->address();
@@ -283,6 +279,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry->set_script_id(script->id()->value());
rec->size = code->ExecutableSize();
rec->shared = shared->address();
+ rec->entry->set_bailout_reason(
+ GetBailoutReason(shared->DisableOptimizationReason()));
processor_->Enqueue(evt_rec);
}
@@ -373,11 +371,12 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
- profiles_(new CpuProfilesCollection()),
+ sampling_interval_(TimeDelta::FromMicroseconds(
+ FLAG_cpu_profiler_sampling_interval)),
+ profiles_(new CpuProfilesCollection(isolate->heap())),
next_profile_uid_(1),
generator_(NULL),
processor_(NULL),
- need_to_stop_sampler_(false),
is_profiling_(false) {
}
@@ -387,11 +386,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
+ sampling_interval_(TimeDelta::FromMicroseconds(
+ FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
next_profile_uid_(1),
generator_(test_generator),
processor_(test_processor),
- need_to_stop_sampler_(false),
is_profiling_(false) {
}
@@ -402,9 +402,15 @@ CpuProfiler::~CpuProfiler() {
}
+void CpuProfiler::set_sampling_interval(TimeDelta value) {
+ ASSERT(!is_profiling_);
+ sampling_interval_ = value;
+}
+
+
void CpuProfiler::ResetProfiles() {
delete profiles_;
- profiles_ = new CpuProfilesCollection();
+ profiles_ = new CpuProfilesCollection(isolate()->heap());
}
@@ -425,12 +431,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
- saved_logging_nesting_ = logger->logging_nesting_;
- logger->logging_nesting_ = 0;
+ saved_is_logging_ = logger->is_logging_;
+ logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
+ Sampler* sampler = logger->sampler();
+ processor_ = new ProfilerEventsProcessor(
+ generator_, sampler, sampling_interval_);
is_profiling_ = true;
- processor_->StartSynchronously();
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
if (!FLAG_prof_browser_mode) {
@@ -440,12 +447,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->LogAccessorCallbacks();
LogBuiltins();
// Enable stack sampling.
- Sampler* sampler = logger->sampler();
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
+ processor_->StartSynchronously();
}
}
@@ -477,18 +481,15 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
- sampler->DecreaseProfilingDepth();
- if (need_to_stop_sampler_) {
- sampler->Stop();
- need_to_stop_sampler_ = false;
- }
is_profiling_ = false;
processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;
generator_ = NULL;
- logger->logging_nesting_ = saved_logging_nesting_;
+ sampler->SetHasProcessingThread(false);
+ sampler->DecreaseProfilingDepth();
+ logger->is_logging_ = saved_is_logging_;
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 1dd405e5d..8aba5426d 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
+#include "platform/time.h"
#include "sampler.h"
#include "unbound-queue.h"
@@ -114,10 +115,6 @@ class TickSampleEventRecord {
unsigned order;
TickSample sample;
-
- static TickSampleEventRecord* cast(void* value) {
- return reinterpret_cast<TickSampleEventRecord*>(value);
- }
};
@@ -140,7 +137,9 @@ class CodeEventsContainer {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ TimeDelta period);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -156,17 +155,31 @@ class ProfilerEventsProcessor : public Thread {
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
- bool ProcessTicks();
+
+ enum SampleProcessingResult {
+ OneSampleProcessed,
+ FoundSampleForNextCodeEvent,
+ NoSamplesInQueue
+ };
+ SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const TimeDelta period_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
+ static const size_t kTickSampleBufferSize = 1 * MB;
+ static const size_t kTickSampleQueueLength =
+ kTickSampleBufferSize / sizeof(TickSampleEventRecord);
+ SamplingCircularQueue<TickSampleEventRecord,
+ kTickSampleQueueLength> ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned last_code_event_id_;
unsigned last_processed_code_event_id_;
@@ -195,6 +208,7 @@ class CpuProfiler : public CodeEventListener {
virtual ~CpuProfiler();
+ void set_sampling_interval(TimeDelta value);
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
@@ -205,7 +219,8 @@ class CpuProfiler : public CodeEventListener {
void DeleteProfile(CpuProfile* profile);
// Invoked from stack sampler (thread or signal handler.)
- TickSample* TickSampleEvent();
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
@@ -251,12 +266,12 @@ class CpuProfiler : public CodeEventListener {
void LogBuiltins();
Isolate* isolate_;
+ TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
- int saved_logging_nesting_;
- bool need_to_stop_sampler_;
+ bool saved_is_logging_;
bool is_profiling_;
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
diff --git a/deps/v8/src/cpu.cc b/deps/v8/src/cpu.cc
new file mode 100644
index 000000000..2bf51a7f6
--- /dev/null
+++ b/deps/v8/src/cpu.cc
@@ -0,0 +1,466 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "cpu.h"
+
+#if V8_CC_MSVC
+#include <intrin.h> // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h> // sysconf()
+#endif
+
+#include <algorithm>
+#include <cctype>
+#include <climits>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "checks.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+// Define __cpuid() for non-MSVC compilers.
+#if !V8_CC_MSVC
+
+static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+#if defined(__i386__) && defined(__pic__)
+ // Make sure to preserve ebx, which contains the pointer
+ // to the GOT in case we're generating PIC.
+ __asm__ volatile (
+ "mov %%ebx, %%edi\n\t"
+ "cpuid\n\t"
+ "xchg %%edi, %%ebx\n\t"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#else
+ __asm__ volatile (
+ "cpuid \n\t"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#endif // defined(__i386__) && defined(__pic__)
+}
+
+#endif // !V8_CC_MSVC
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
+#if V8_HOST_ARCH_ARM
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3) /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_JAVA (1 << 8)
+#define HWCAP_IWMMXT (1 << 9)
+#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+#define HWCAP_LPAE (1 << 20)
+
+#define AT_HWCAP 16
+
+// Read the ELF HWCAP flags by parsing /proc/self/auxv.
+static uint32_t ReadELFHWCaps() {
+ uint32_t result = 0;
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != NULL) {
+ struct { uint32_t tag; uint32_t value; } entry;
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+ break;
+ }
+ if (entry.tag == AT_HWCAP) {
+ result = entry.value;
+ break;
+ }
+ }
+ fclose(fp);
+ }
+ return result;
+}
+
+#endif // V8_HOST_ARCH_ARM
+
+// Extract the information exposed by the kernel via /proc/cpuinfo.
+class CPUInfo V8_FINAL BASE_EMBEDDED {
+ public:
+ CPUInfo() : datalen_(0) {
+ // Get the size of the cpuinfo file by reading it until the end. This is
+ // required because files under /proc do not always return a valid size
+ // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
+ static const char PATHNAME[] = "/proc/cpuinfo";
+ FILE* fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (;;) {
+ char buffer[256];
+ size_t n = fread(buffer, 1, sizeof(buffer), fp);
+ if (n == 0) {
+ break;
+ }
+ datalen_ += n;
+ }
+ fclose(fp);
+ }
+
+ // Read the contents of the cpuinfo file.
+ data_ = new char[datalen_ + 1];
+ fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (size_t offset = 0; offset < datalen_; ) {
+ size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
+ if (n == 0) {
+ break;
+ }
+ offset += n;
+ }
+ fclose(fp);
+ }
+
+ // Zero-terminate the data.
+ data_[datalen_] = '\0';
+ }
+
+ ~CPUInfo() {
+ delete[] data_;
+ }
+
+ // Extract the content of a the first occurence of a given field in
+ // the content of the cpuinfo file and return it as a heap-allocated
+ // string that must be freed by the caller using delete[].
+ // Return NULL if not found.
+ char* ExtractField(const char* field) const {
+ ASSERT(field != NULL);
+
+ // Look for first field occurence, and ensure it starts the line.
+ size_t fieldlen = strlen(field);
+ char* p = data_;
+ for (;;) {
+ p = strstr(p, field);
+ if (p == NULL) {
+ return NULL;
+ }
+ if (p == data_ || p[-1] == '\n') {
+ break;
+ }
+ p += fieldlen;
+ }
+
+ // Skip to the first colon followed by a space.
+ p = strchr(p + fieldlen, ':');
+ if (p == NULL || !isspace(p[1])) {
+ return NULL;
+ }
+ p += 2;
+
+ // Find the end of the line.
+ char* q = strchr(p, '\n');
+ if (q == NULL) {
+ q = data_ + datalen_;
+ }
+
+ // Copy the line into a heap-allocated buffer.
+ size_t len = q - p;
+ char* result = new char[len + 1];
+ if (result != NULL) {
+ memcpy(result, p, len);
+ result[len] = '\0';
+ }
+ return result;
+ }
+
+ private:
+ char* data_;
+ size_t datalen_;
+};
+
+
+// Checks that a space-separated list of items contains one given 'item'.
+static bool HasListItem(const char* list, const char* item) {
+ ssize_t item_len = strlen(item);
+ const char* p = list;
+ if (p != NULL) {
+ while (*p != '\0') {
+ // Skip whitespace.
+ while (isspace(*p)) ++p;
+
+ // Find end of current list item.
+ const char* q = p;
+ while (*q != '\0' && !isspace(*q)) ++q;
+
+ if (item_len == q - p && memcmp(p, item, item_len) == 0) {
+ return true;
+ }
+
+ // Skip to next item.
+ p = q;
+ }
+ }
+ return false;
+}
+
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+CPU::CPU() : stepping_(0),
+ model_(0),
+ ext_model_(0),
+ family_(0),
+ ext_family_(0),
+ type_(0),
+ implementer_(0),
+ architecture_(0),
+ part_(0),
+ has_fpu_(false),
+ has_cmov_(false),
+ has_sahf_(false),
+ has_mmx_(false),
+ has_sse_(false),
+ has_sse2_(false),
+ has_sse3_(false),
+ has_ssse3_(false),
+ has_sse41_(false),
+ has_sse42_(false),
+ has_idiva_(false),
+ has_neon_(false),
+ has_thumbee_(false),
+ has_vfp_(false),
+ has_vfp3_(false),
+ has_vfp3_d32_(false) {
+ memcpy(vendor_, "Unknown", 8);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ int cpu_info[4];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form. The human readable order is CPUInfo[1] |
+ // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+ // before using memcpy to copy these three array elements to cpu_string.
+ __cpuid(cpu_info, 0);
+ unsigned num_ids = cpu_info[0];
+ std::swap(cpu_info[2], cpu_info[3]);
+ memcpy(vendor_, cpu_info + 1, 12);
+ vendor_[12] = '\0';
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ __cpuid(cpu_info, 1);
+ stepping_ = cpu_info[0] & 0xf;
+ model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+ family_ = (cpu_info[0] >> 8) & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ ext_model_ = (cpu_info[0] >> 16) & 0xf;
+ ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
+ has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
+ has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+ has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+ has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+ has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+ has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+ has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+ has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ }
+
+ // Query extended IDs.
+ __cpuid(cpu_info, 0x80000000);
+ unsigned num_ext_ids = cpu_info[0];
+
+ // Interpret extended CPU feature information.
+ if (num_ext_ids > 0x80000000) {
+ __cpuid(cpu_info, 0x80000001);
+ // SAHF is always available in compat/legacy mode,
+ // but must be probed in long mode.
+#if V8_HOST_ARCH_IA32
+ has_sahf_ = true;
+#else
+ has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
+#endif
+ }
+#elif V8_HOST_ARCH_ARM
+ CPUInfo cpu_info;
+
+ // Extract implementor from the "CPU implementer" field.
+ char* implementer = cpu_info.ExtractField("CPU implementer");
+ if (implementer != NULL) {
+ char* end ;
+ implementer_ = strtol(implementer, &end, 0);
+ if (end == implementer) {
+ implementer_ = 0;
+ }
+ delete[] implementer;
+ }
+
+ // Extract part number from the "CPU part" field.
+ char* part = cpu_info.ExtractField("CPU part");
+ if (part != NULL) {
+ char* end ;
+ part_ = strtol(part, &end, 0);
+ if (end == part) {
+ part_ = 0;
+ }
+ delete[] part;
+ }
+
+ // Extract architecture from the "CPU Architecture" field.
+ // The list is well-known, unlike the the output of
+ // the 'Processor' field which can vary greatly.
+ // See the definition of the 'proc_arch' array in
+ // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+ // same file.
+ char* architecture = cpu_info.ExtractField("CPU architecture");
+ if (architecture != NULL) {
+ char* end;
+ architecture_ = strtol(architecture, &end, 10);
+ if (end == architecture) {
+ architecture_ = 0;
+ }
+ delete[] architecture;
+
+ // Unfortunately, it seems that certain ARMv6-based CPUs
+ // report an incorrect architecture number of 7!
+ //
+ // See http://code.google.com/p/android/issues/detail?id=10812
+ //
+ // We try to correct this by looking at the 'elf_format'
+ // field reported by the 'Processor' field, which is of the
+ // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+ // an ARMv6-one. For example, the Raspberry Pi is one popular
+ // ARMv6 device that reports architecture 7.
+ if (architecture_ == 7) {
+ char* processor = cpu_info.ExtractField("Processor");
+ if (HasListItem(processor, "(v6l)")) {
+ architecture_ = 6;
+ }
+ delete[] processor;
+ }
+ }
+
+ // Try to extract the list of CPU features from ELF hwcaps.
+ uint32_t hwcaps = ReadELFHWCaps();
+ if (hwcaps != 0) {
+ has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
+ has_neon_ = (hwcaps & HWCAP_NEON) != 0;
+ has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0;
+ has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
+ has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
+ has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
+ (hwcaps & HWCAP_VFPD32) != 0));
+ } else {
+ // Try to fallback to "Features" CPUInfo field.
+ char* features = cpu_info.ExtractField("Features");
+ has_idiva_ = HasListItem(features, "idiva");
+ has_neon_ = HasListItem(features, "neon");
+ has_thumbee_ = HasListItem(features, "thumbee");
+ has_vfp_ = HasListItem(features, "vfp");
+ if (HasListItem(features, "vfpv3")) {
+ has_vfp3_ = true;
+ has_vfp3_d32_ = true;
+ } else if (HasListItem(features, "vfpv3d16")) {
+ has_vfp3_ = true;
+ }
+ delete[] features;
+ }
+
+ // Some old kernels will report vfp not vfpv3. Here we make an attempt
+ // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+ // available on architectures with vfpv3. Checking neon on its own is
+ // not enough as it is possible to have neon without vfp.
+ if (has_vfp_ && has_neon_) {
+ has_vfp3_ = true;
+ }
+
+ // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+ if (architecture_ < 7 && has_vfp3_) {
+ architecture_ = 7;
+ }
+
+ // ARMv7 implies ThumbEE.
+ if (architecture_ >= 7) {
+ has_thumbee_ = true;
+ }
+
+ // The earliest architecture with ThumbEE is ARMv6T2.
+ if (has_thumbee_ && architecture_ < 6) {
+ architecture_ = 6;
+ }
+
+ // We don't support any FPUs other than VFP.
+ has_fpu_ = has_vfp_;
+#elif V8_HOST_ARCH_MIPS
+ // Simple detection of FPU at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to MIPS (early 2010), no similar
+ // facility is universally available on the MIPS architectures,
+ // so it's up to individual OSes to provide such.
+ CPUInfo cpu_info;
+ char* cpu_model = cpu_info.ExtractField("cpu model");
+ has_fpu_ = HasListItem(cpu_model, "FPU");
+ delete[] cpu_model;
+#endif
+}
+
+
+// static
+int CPU::NumberOfProcessorsOnline() {
+#if V8_OS_WIN
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+#else
+ return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/cpu.h b/deps/v8/src/cpu.h
index 247af71aa..b2e9f7da7 100644
--- a/deps/v8/src/cpu.h
+++ b/deps/v8/src/cpu.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,14 +44,64 @@ namespace internal {
// ----------------------------------------------------------------------------
// CPU
//
-// This class has static methods for the architecture specific functions. Add
-// methods here to cope with differences between the supported architectures.
+// Query information about the processor.
//
-// For each architecture the file cpu_<arch>.cc contains the implementation of
-// these functions.
+// This class also has static methods for the architecture specific functions.
+// Add methods here to cope with differences between the supported
+// architectures. For each architecture the file cpu_<arch>.cc contains the
+// implementation of these static functions.
-class CPU : public AllStatic {
+class CPU V8_FINAL BASE_EMBEDDED {
public:
+ CPU();
+
+ // x86 CPUID information
+ const char* vendor() const { return vendor_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int ext_model() const { return ext_model_; }
+ int family() const { return family_; }
+ int ext_family() const { return ext_family_; }
+ int type() const { return type_; }
+
+ // arm implementer/part information
+ int implementer() const { return implementer_; }
+ static const int ARM = 0x41;
+ static const int QUALCOMM = 0x51;
+ int architecture() const { return architecture_; }
+ int part() const { return part_; }
+ static const int ARM_CORTEX_A5 = 0xc05;
+ static const int ARM_CORTEX_A7 = 0xc07;
+ static const int ARM_CORTEX_A8 = 0xc08;
+ static const int ARM_CORTEX_A9 = 0xc09;
+ static const int ARM_CORTEX_A12 = 0xc0c;
+ static const int ARM_CORTEX_A15 = 0xc0f;
+
+ // General features
+ bool has_fpu() const { return has_fpu_; }
+
+ // x86 features
+ bool has_cmov() const { return has_cmov_; }
+ bool has_sahf() const { return has_sahf_; }
+ bool has_mmx() const { return has_mmx_; }
+ bool has_sse() const { return has_sse_; }
+ bool has_sse2() const { return has_sse2_; }
+ bool has_sse3() const { return has_sse3_; }
+ bool has_ssse3() const { return has_ssse3_; }
+ bool has_sse41() const { return has_sse41_; }
+ bool has_sse42() const { return has_sse42_; }
+
+ // arm features
+ bool has_idiva() const { return has_idiva_; }
+ bool has_neon() const { return has_neon_; }
+ bool has_thumbee() const { return has_thumbee_; }
+ bool has_vfp() const { return has_vfp_; }
+ bool has_vfp3() const { return has_vfp3_; }
+ bool has_vfp3_d32() const { return has_vfp3_d32_; }
+
+ // Returns the number of processors online.
+ static int NumberOfProcessorsOnline();
+
// Initializes the cpu architecture support. Called once at VM startup.
static void SetUp();
@@ -60,8 +110,33 @@ class CPU : public AllStatic {
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
- // Try to activate a system level debugger.
- static void DebugBreak();
+ private:
+ char vendor_[13];
+ int stepping_;
+ int model_;
+ int ext_model_;
+ int family_;
+ int ext_family_;
+ int type_;
+ int implementer_;
+ int architecture_;
+ int part_;
+ bool has_fpu_;
+ bool has_cmov_;
+ bool has_sahf_;
+ bool has_mmx_;
+ bool has_sse_;
+ bool has_sse2_;
+ bool has_sse3_;
+ bool has_ssse3_;
+ bool has_sse41_;
+ bool has_sse42_;
+ bool has_idiva_;
+ bool has_neon_;
+ bool has_thumbee_;
+ bool has_vfp_;
+ bool has_vfp3_;
+ bool has_vfp3_d32_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 9a72518f4..602ae166b 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -29,8 +29,9 @@
#include "d8.h"
#include "d8-debug.h"
-#include "platform.h"
#include "debug-agent.h"
+#include "platform.h"
+#include "platform/socket.h"
namespace v8 {
@@ -171,21 +172,14 @@ void RunRemoteDebugger(Isolate* isolate, int port) {
void RemoteDebugger::Run() {
bool ok;
- // Make sure that socket support is initialized.
- ok = i::Socket::SetUp();
- if (!ok) {
- printf("Unable to initialize socket support %d\n", i::Socket::LastError());
- return;
- }
-
// Connect to the debugger agent.
- conn_ = i::OS::CreateSocket();
+ conn_ = new i::Socket;
static const int kPortStrSize = 6;
char port_str[kPortStrSize];
i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
ok = conn_->Connect("localhost", port_str);
if (!ok) {
- printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
+ printf("Unable to connect to debug agent %d\n", i::Socket::GetLastError());
return;
}
@@ -201,7 +195,7 @@ void RemoteDebugger::Run() {
// Process events received from debugged VM and from the keyboard.
bool terminate = false;
while (!terminate) {
- event_available_->Wait();
+ event_available_.Wait();
RemoteDebuggerEvent* event = GetEvent();
switch (event->type()) {
case RemoteDebuggerEvent::kMessage:
@@ -248,7 +242,7 @@ void RemoteDebugger::ConnectionClosed() {
void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
if (head_ == NULL) {
ASSERT(tail_ == NULL);
head_ = event;
@@ -258,12 +252,12 @@ void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
tail_->set_next(event);
tail_ = event;
}
- event_available_->Signal();
+ event_available_.Signal();
}
RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
ASSERT(head_ != NULL);
RemoteDebuggerEvent* result = head_;
head_ = head_->next();
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
index 2386b6bd6..55876229a 100644
--- a/deps/v8/src/d8-debug.h
+++ b/deps/v8/src/d8-debug.h
@@ -53,8 +53,7 @@ class RemoteDebugger {
explicit RemoteDebugger(Isolate* isolate, int port)
: isolate_(isolate),
port_(port),
- event_access_(i::OS::CreateMutex()),
- event_available_(i::OS::CreateSemaphore(0)),
+ event_available_(0),
head_(NULL), tail_(NULL) {}
void Run();
@@ -84,8 +83,8 @@ class RemoteDebugger {
// Linked list of events from debugged V8 and from keyboard input. Access to
// the list is guarded by a mutex and a semaphore signals new items in the
// list.
- i::Mutex* event_access_;
- i::Semaphore* event_available_;
+ i::Mutex event_access_;
+ i::Semaphore event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index c7b66c2a1..fb75d81c2 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -157,7 +157,7 @@ CounterMap* Shell::counter_map_;
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
+i::Mutex Shell::context_mutex_;
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@@ -271,10 +271,10 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
for (int i = 0; i < data_->realm_count_; ++i)
- data_->realms_[i].Dispose(data_->isolate_);
+ data_->realms_[i].Dispose();
delete[] data_->realms_;
if (!data_->realm_shared_.IsEmpty())
- data_->realm_shared_.Dispose(data_->isolate_);
+ data_->realm_shared_.Dispose();
}
@@ -361,7 +361,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw("Invalid realm index");
return;
}
- data->realms_[index].Dispose(isolate);
+ data->realms_[index].Dispose();
data->realms_[index].Clear();
}
@@ -420,7 +420,7 @@ void Shell::RealmSharedSet(Local<String> property,
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate);
+ if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose();
data->realm_shared_.Reset(isolate, value);
}
@@ -766,13 +766,14 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
- i::Debug* debug = i::Isolate::Current()->debug();
+ i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
- debug->debug_context()->set_security_token(HEAP->undefined_value());
+ debug->debug_context()->set_security_token(
+ reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
// Run the d8 shell utility script in the utility context
@@ -925,7 +926,7 @@ void Shell::InitializeDebugger(Isolate* isolate) {
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- i::ScopedLock lock(context_mutex_);
+ i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
@@ -935,7 +936,7 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Context::Scope scope(context);
#ifndef V8_SHARED
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
factory->NewFixedArray(js_args.argc());
@@ -1011,7 +1012,6 @@ void Shell::OnExit() {
"-------------+\n");
delete [] counters;
}
- delete context_mutex_;
delete counters_file_;
delete counter_map_;
#endif // V8_SHARED
@@ -1221,10 +1221,6 @@ void ShellThread::Run() {
SourceGroup::~SourceGroup() {
#ifndef V8_SHARED
- delete next_semaphore_;
- next_semaphore_ = NULL;
- delete done_semaphore_;
- done_semaphore_ = NULL;
delete thread_;
thread_ = NULL;
#endif // V8_SHARED
@@ -1285,7 +1281,7 @@ i::Thread::Options SourceGroup::GetThreadOptions() {
void SourceGroup::ExecuteInThread() {
Isolate* isolate = Isolate::New();
do {
- if (next_semaphore_ != NULL) next_semaphore_->Wait();
+ next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
@@ -1305,7 +1301,7 @@ void SourceGroup::ExecuteInThread() {
V8::IdleNotification(kLongIdlePauseInMs);
}
}
- if (done_semaphore_ != NULL) done_semaphore_->Signal();
+ done_semaphore_.Signal();
} while (!Shell::options.last_run);
isolate->Dispose();
}
@@ -1316,7 +1312,7 @@ void SourceGroup::StartExecuteInThread() {
thread_ = new IsolateThread(this);
thread_->Start();
}
- next_semaphore_->Signal();
+ next_semaphore_.Signal();
}
@@ -1325,7 +1321,7 @@ void SourceGroup::WaitForThread() {
if (Shell::options.last_run) {
thread_->Join();
} else {
- done_semaphore_->Wait();
+ done_semaphore_.Wait();
}
}
#endif // V8_SHARED
@@ -1556,11 +1552,10 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifdef V8_SHARED
static void SetStandaloneFlagsViaCommandLine() {
- int fake_argc = 3;
- char **fake_argv = new char*[3];
+ int fake_argc = 2;
+ char **fake_argv = new char*[2];
fake_argv[0] = NULL;
- fake_argv[1] = strdup("--harmony-typed-arrays");
- fake_argv[2] = strdup("--trace-hydrogen-file=hydrogen.cfg");
+ fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
free(fake_argv[1]);
delete[] fake_argv;
@@ -1649,8 +1644,6 @@ int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU();
#ifndef V8_SHARED
- i::FLAG_harmony_array_buffer = true;
- i::FLAG_harmony_typed_arrays = true;
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
#else
SetStandaloneFlagsViaCommandLine();
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 3b06985ca..1ae1bcfe6 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -140,8 +140,8 @@ class SourceGroup {
public:
SourceGroup() :
#ifndef V8_SHARED
- next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
- done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+ next_semaphore_(0),
+ done_semaphore_(0),
thread_(NULL),
#endif // V8_SHARED
argv_(NULL),
@@ -180,8 +180,8 @@ class SourceGroup {
static i::Thread::Options GetThreadOptions();
void ExecuteInThread();
- i::Semaphore* next_semaphore_;
- i::Semaphore* done_semaphore_;
+ i::Semaphore next_semaphore_;
+ i::Semaphore done_semaphore_;
i::Thread* thread_;
#endif // V8_SHARED
@@ -390,7 +390,7 @@ class Shell : public i::AllStatic {
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex* context_mutex_;
+ static i::Mutex context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
@@ -400,8 +400,8 @@ class Shell : public i::AllStatic {
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
- static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
- static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
+ static Handle<FunctionTemplate> CreateArrayBufferTemplate(FunctionCallback);
+ static Handle<FunctionTemplate> CreateArrayTemplate(FunctionCallback);
static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
int32_t size);
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 811c00e0c..51823aaf2 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
#include "v8.h"
#include "debug.h"
#include "debug-agent.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#include "platform/socket.h"
namespace v8 {
namespace internal {
@@ -38,16 +38,36 @@ namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
+ Isolate* isolate = reinterpret_cast<Isolate*>(message.GetIsolate());
+ DebuggerAgent* agent = isolate->debugger_agent_instance();
ASSERT(agent != NULL);
agent->DebuggerMessage(message);
}
+DebuggerAgent::DebuggerAgent(Isolate* isolate, const char* name, int port)
+ : Thread(name),
+ isolate_(isolate),
+ name_(StrDup(name)),
+ port_(port),
+ server_(new Socket),
+ terminate_(false),
+ session_(NULL),
+ terminate_now_(0),
+ listening_(0) {
+ ASSERT(isolate_->debugger_agent_instance() == NULL);
+ isolate_->set_debugger_agent_instance(this);
+}
+
+
+DebuggerAgent::~DebuggerAgent() {
+ isolate_->set_debugger_agent_instance(NULL);
+ delete server_;
+}
+
+
// Debugger agent main thread.
void DebuggerAgent::Run() {
- const int kOneSecondInMicros = 1000000;
-
// Allow this socket to reuse port even if still in TIME_WAIT.
server_->SetReuseAddress(true);
@@ -60,16 +80,20 @@ void DebuggerAgent::Run() {
// would be that the port is already in use so this avoids a busy loop and
// make the agent take over the port when it becomes free.
if (!bound) {
+ const TimeDelta kTimeout = TimeDelta::FromSeconds(1);
PrintF("Failed to open socket on port %d, "
- "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
- terminate_now_->Wait(kOneSecondInMicros);
+ "waiting %d ms before retrying\n", port_,
+ static_cast<int>(kTimeout.InMilliseconds()));
+ if (!terminate_now_.WaitFor(kTimeout)) {
+ if (terminate_) return;
+ }
}
}
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
- listening_->Signal();
+ listening_.Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
@@ -89,7 +113,7 @@ void DebuggerAgent::Shutdown() {
// Signal termination and make the server exit either its listen call or its
// binding loop. This makes sure that no new sessions can be established.
- terminate_now_->Signal();
+ terminate_now_.Signal();
server_->Shutdown();
Join();
@@ -99,19 +123,21 @@ void DebuggerAgent::Shutdown() {
void DebuggerAgent::WaitUntilListening() {
- listening_->Wait();
+ listening_.Wait();
}
static const char* kCreateSessionMessage =
"Remote debugging session already active\r\n";
void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
- client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
+ int len = StrLength(kCreateSessionMessage);
+ int res = client->Send(kCreateSessionMessage, len);
delete client;
+ USE(res);
return;
}
@@ -123,7 +149,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Terminate the session.
if (session_ != NULL) {
@@ -136,7 +162,7 @@ void DebuggerAgent::CloseSession() {
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
@@ -154,7 +180,7 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
}
// Terminate the session.
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
ASSERT(session == session_);
if (session == session_) {
session_->Shutdown();
@@ -226,7 +252,7 @@ void DebuggerAgentSession::Shutdown() {
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
+SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(Socket* conn) {
int received;
// Read header.
@@ -243,7 +269,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received == 0) {
- PrintF("Error %d\n", Socket::LastError());
+ PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
@@ -305,7 +331,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
char* buffer = NewArray<char>(content_length + 1);
received = ReceiveAll(conn, buffer, content_length);
if (received < content_length) {
- PrintF("Error %d\n", Socket::LastError());
+ PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
buffer[content_length] = '\0';
@@ -314,7 +340,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
}
-bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendConnectMessage(Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
@@ -360,7 +386,7 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
}
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendMessage(Socket* conn,
const Vector<uint16_t> message) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@@ -375,14 +401,17 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
// Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: %d\r\n", kContentLength, utf8_len);
- conn->Send(buffer, len);
+ int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+ "%s: %d\r\n", kContentLength, utf8_len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Send message body as UTF-8.
int buffer_position = 0; // Current buffer position.
@@ -402,13 +431,19 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
const int kEncodedSurrogateLength =
unibrow::Utf16::kUtf8BytesToCodeASurrogate;
ASSERT(buffer_position >= kEncodedSurrogateLength);
- conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
+ len = buffer_position - kEncodedSurrogateLength;
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
for (int i = 0; i < kEncodedSurrogateLength; i++) {
buffer[i] = buffer[buffer_position + i];
}
buffer_position = kEncodedSurrogateLength;
} else {
- conn->Send(buffer, buffer_position);
+ len = buffer_position;
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
buffer_position = 0;
}
}
@@ -419,7 +454,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendMessage(Socket* conn,
const v8::Handle<v8::String> request) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@@ -428,24 +463,30 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
v8::String::Utf8Value utf8_request(request);
// Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Content-Length: %d\r\n", utf8_request.length());
- conn->Send(buffer, len);
+ int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+ "Content-Length: %d\r\n", utf8_request.length());
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Send message body as UTF-8.
- conn->Send(*utf8_request, utf8_request.length());
+ len = utf8_request.length();
+ if (conn->Send(*utf8_request, len) < len) {
+ return false;
+ }
return true;
}
// Receive the full buffer before returning unless an error occours.
-int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
+int DebuggerAgentUtil::ReceiveAll(Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index 61151900f..138e51acc 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -37,27 +37,15 @@ namespace internal {
// Forward decelrations.
class DebuggerAgentSession;
+class Socket;
// Debugger agent which starts a socket listener on the debugger port and
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
- DebuggerAgent(const char* name, int port)
- : Thread(name),
- isolate_(Isolate::Current()),
- name_(StrDup(name)), port_(port),
- server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)),
- listening_(OS::CreateSemaphore(0)) {
- ASSERT(isolate_->debugger_agent_instance() == NULL);
- isolate_->set_debugger_agent_instance(this);
- }
- ~DebuggerAgent() {
- isolate_->set_debugger_agent_instance(NULL);
- delete server_;
- }
+ DebuggerAgent(Isolate* isolate, const char* name, int port);
+ ~DebuggerAgent();
void Shutdown();
void WaitUntilListening();
@@ -76,10 +64,10 @@ class DebuggerAgent: public Thread {
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
+ RecursiveMutex session_access_; // Mutex guarding access to session_.
DebuggerAgentSession* session_; // Current active session if any.
- Semaphore* terminate_now_; // Semaphore to signal termination.
- Semaphore* listening_;
+ Semaphore terminate_now_; // Semaphore to signal termination.
+ Semaphore listening_;
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
@@ -116,13 +104,11 @@ class DebuggerAgentUtil {
public:
static const char* const kContentLength;
- static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
- static bool SendConnectMessage(const Socket* conn,
- const char* embedding_host);
- static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
- static bool SendMessage(const Socket* conn,
- const v8::Handle<v8::String> message);
- static int ReceiveAll(const Socket* conn, char* data, int len);
+ static SmartArrayPointer<char> ReceiveMessage(Socket* conn);
+ static bool SendConnectMessage(Socket* conn, const char* embedding_host);
+ static bool SendMessage(Socket* conn, const Vector<uint16_t> message);
+ static bool SendMessage(Socket* conn, const v8::Handle<v8::String> message);
+ static int ReceiveAll(Socket* conn, char* data, int len);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index a588b4c21..19209d4b9 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -957,12 +957,17 @@ function ExecutionState(break_id) {
this.selected_frame = 0;
}
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
+ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
+ opt_callframe) {
var action = Debug.StepAction.StepIn;
if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
var count = opt_count ? %ToNumber(opt_count) : 1;
+ var callFrameId = 0;
+ if (!IS_UNDEFINED(opt_callframe)) {
+ callFrameId = opt_callframe.details_.frameId();
+ }
- return %PrepareStep(this.break_id, action, count);
+ return %PrepareStep(this.break_id, action, count, callFrameId);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 990a9a5c7..0496b8cb0 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -86,8 +86,9 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
- Isolate* isolate = Isolate::Current();
+static Handle<Code> ComputeCallDebugPrepareStepIn(Isolate* isolate,
+ int argc,
+ Code::Kind kind) {
return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
}
@@ -403,11 +404,11 @@ void BreakLocationIterator::ClearDebugBreak() {
bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
- if (RelocInfo::IsConstructCall(rmode())) {
+ if (RelocInfo::IsConstructCall(original_rmode())) {
return true;
} else if (RelocInfo::IsCodeTarget(rmode())) {
HandleScope scope(debug_info_->GetIsolate());
- Address target = rinfo()->target_address();
+ Address target = original_rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
if (target_code->kind() == Code::STUB) {
return target_code->major_key() == CodeStub::CallFunction;
@@ -433,7 +434,7 @@ void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- target_code->arguments_count(), target_code->kind());
+ isolate, target_code->arguments_count(), target_code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@@ -633,7 +634,7 @@ const int Debug::kFrameDropperFrameSize = 4;
void ScriptCache::Add(Handle<Script> script) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = isolate_->global_handles();
// Create an entry in the hash map for the script.
int id = script->id()->value();
HashMap::Entry* entry =
@@ -655,7 +656,7 @@ void ScriptCache::Add(Handle<Script> script) {
Handle<FixedArray> ScriptCache::GetScripts() {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = isolate_->factory();
Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
@@ -670,7 +671,7 @@ Handle<FixedArray> ScriptCache::GetScripts() {
void ScriptCache::ProcessCollectedScripts() {
- Debugger* debugger = Isolate::Current()->debugger();
+ Debugger* debugger = isolate_->debugger();
for (int i = 0; i < collected_scripts_.length(); i++) {
debugger->OnScriptCollected(collected_scripts_[i]);
}
@@ -679,7 +680,7 @@ void ScriptCache::ProcessCollectedScripts() {
void ScriptCache::Clear() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = isolate_->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
@@ -708,7 +709,7 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj->Dispose(isolate);
+ obj->Dispose();
}
@@ -750,7 +751,7 @@ void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
(global_handles->Create(debug_info)));
@@ -761,13 +762,12 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
DebugInfoListNode::~DebugInfoListNode() {
- Isolate::Current()->global_handles()->Destroy(
+ debug_info_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_info_.location()));
}
-bool Debug::CompileDebuggerScript(int index) {
- Isolate* isolate = Isolate::Current();
+bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -824,7 +824,7 @@ bool Debug::CompileDebuggerScript(int index) {
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+ MessageHandler::ReportMessage(isolate, NULL, message);
isolate->clear_pending_exception();
}
return false;
@@ -852,7 +852,7 @@ bool Debug::Load() {
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
- DisableBreak disable(true);
+ DisableBreak disable(isolate_, true);
PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
@@ -886,12 +886,12 @@ bool Debug::Load() {
// Compile the JavaScript for the debugger in the debugger context.
debugger->set_compiling_natives(true);
bool caught_exception =
- !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
- !CompileDebuggerScript(Natives::GetIndex("debug"));
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) ||
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("debug"));
if (FLAG_enable_liveedit) {
caught_exception = caught_exception ||
- !CompileDebuggerScript(Natives::GetIndex("liveedit"));
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit"));
}
debugger->set_compiling_natives(false);
@@ -958,7 +958,7 @@ Object* Debug::Break(Arguments args) {
}
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return heap->undefined_value();
}
@@ -1017,7 +1017,7 @@ Object* Debug::Break(Arguments args) {
// Clear queue
thread_local_.queued_step_count_ = 0;
- PrepareStep(StepNext, step_count);
+ PrepareStep(StepNext, step_count, StackFrame::NO_ID);
} else {
// Notify the debug event listeners.
isolate_->debugger()->OnDebugBreak(break_points_hit, false);
@@ -1055,7 +1055,7 @@ Object* Debug::Break(Arguments args) {
ClearStepping();
// Set up for the remaining steps.
- PrepareStep(step_action, step_count);
+ PrepareStep(step_action, step_count, StackFrame::NO_ID);
}
if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
@@ -1376,7 +1376,9 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
-void Debug::PrepareStep(StepAction step_action, int step_count) {
+void Debug::PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
@@ -1402,6 +1404,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// If there is no JavaScript stack don't do anything.
return;
}
+ if (frame_id != StackFrame::NO_ID) {
+ id = frame_id;
+ }
JavaScriptFrameIterator frames_it(isolate_, id);
JavaScriptFrame* frame = frames_it.frame();
@@ -1649,7 +1654,7 @@ bool Debug::IsBreakStub(Code* code) {
// Find the builtin to use for invoking the debug break
Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = code->GetIsolate();
// Find the builtin debug break function matching the calling convention
// used by the call site.
@@ -1704,7 +1709,7 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
BreakPositionAlignment position_alignment) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = shared->GetIsolate();
Heap* heap = isolate->heap();
if (!HasDebugInfo(shared)) {
return Handle<Object>(heap->undefined_value(), isolate);
@@ -1883,7 +1888,7 @@ static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
// Use compile lazy which will end up compiling the full code in the
// configuration configured above.
bool result = Compiler::CompileLazy(&info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
+ ASSERT(result != info.isolate()->has_pending_exception());
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
@@ -2047,7 +2052,7 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
isolate_->optimizing_compiler_thread()->Flush();
}
@@ -2105,10 +2110,9 @@ void Debug::PrepareForBreakPoints() {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
} else if (kind == Code::BUILTIN &&
- (function->IsMarkedForInstallingRecompiledCode() ||
- function->IsInRecompileQueue() ||
+ (function->IsInRecompileQueue() ||
function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation())) {
+ function->IsMarkedForConcurrentRecompilation())) {
// Abort in-flight compilation.
Code* shared_code = function->shared()->code();
if (shared_code->kind() == Code::FUNCTION &&
@@ -2537,7 +2541,7 @@ void Debug::CreateScriptCache() {
"Debug::CreateScriptCache");
ASSERT(script_cache_ == NULL);
- script_cache_ = new ScriptCache();
+ script_cache_ = new ScriptCache(isolate_);
// Scan heap for Script objects.
int count = 0;
@@ -2612,24 +2616,18 @@ Debugger::Debugger(Isolate* isolate)
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
debug_message_dispatch_handler_(NULL),
message_dispatch_helper_thread_(NULL),
- host_dispatch_micros_(100 * 1000),
+ host_dispatch_period_(TimeDelta::FromMilliseconds(100)),
agent_(NULL),
command_queue_(isolate->logger(), kQueueInitialSize),
- command_received_(OS::CreateSemaphore(0)),
+ command_received_(0),
event_command_queue_(isolate->logger(), kQueueInitialSize),
isolate_(isolate) {
}
-Debugger::~Debugger() {
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
- delete command_received_;
- command_received_ = 0;
-}
+Debugger::~Debugger() {}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@@ -2760,7 +2758,7 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
}
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
@@ -2826,7 +2824,7 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
if (!EventActive(v8::BeforeCompile)) return;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the event data object.
@@ -2863,7 +2861,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool in_debugger = debug->InDebugger();
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// If debugging there might be script break points registered for this
@@ -2891,7 +2889,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool caught_exception;
Handle<Object> argv[] = { wrapper };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(),
+ isolate_->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
@@ -2926,7 +2924,7 @@ void Debugger::OnScriptCollected(int id) {
if (!Debugger::EventActive(v8::ScriptCollected)) return;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the script collected state object.
@@ -3043,7 +3041,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
return isolate_->debug()->debug_context();
}
@@ -3152,14 +3150,14 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Wait for new command in the queue.
if (Debugger::host_dispatch_handler_) {
// In case there is a host dispatch - do periodic dispatches.
- if (!command_received_->Wait(host_dispatch_micros_)) {
+ if (!command_received_.WaitFor(host_dispatch_period_)) {
// Timout expired, do the dispatch.
Debugger::host_dispatch_handler_();
continue;
}
} else {
// In case there is no host dispatch - just wait.
- command_received_->Wait();
+ command_received_.Wait();
}
// Get the command from the queue.
@@ -3272,7 +3270,7 @@ void Debugger::SetEventListener(Handle<Object> callback,
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
message_handler_ = handler;
ListenersChanged();
@@ -3301,15 +3299,15 @@ void Debugger::ListenersChanged() {
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period) {
+ TimeDelta period) {
host_dispatch_handler_ = handler;
- host_dispatch_micros_ = period * 1000;
+ host_dispatch_period_ = period;
}
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
@@ -3322,7 +3320,7 @@ void Debugger::SetDebugMessageDispatchHandler(
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message);
@@ -3343,7 +3341,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
client_data);
isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
- command_received_->Signal();
+ command_received_.Signal();
// Set the debug command break flag to have the command processed.
if (!isolate_->debug()->InDebugger()) {
@@ -3352,7 +3350,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
MessageDispatchHelperThread* dispatch_thread;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
dispatch_thread = message_dispatch_helper_thread_;
}
@@ -3381,7 +3379,7 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
bool Debugger::IsDebuggerActive() {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
@@ -3396,7 +3394,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Debugger::never_unload_debugger_ = true;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return isolate_->factory()->undefined_value();
}
@@ -3410,6 +3408,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
+ isolate_,
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
isolate_),
@@ -3427,7 +3426,6 @@ static void StubMessageHandler2(const v8::Debug::Message& message) {
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
- ASSERT(Isolate::Current() == isolate_);
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
@@ -3439,20 +3437,15 @@ bool Debugger::StartAgent(const char* name, int port,
v8::Debug::DebugBreak();
}
- if (Socket::SetUp()) {
- if (agent_ == NULL) {
- agent_ = new DebuggerAgent(name, port);
- agent_->Start();
- }
- return true;
+ if (agent_ == NULL) {
+ agent_ = new DebuggerAgent(isolate_, name, port);
+ agent_->Start();
}
-
- return false;
+ return true;
}
void Debugger::StopAgent() {
- ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL) {
agent_->Shutdown();
agent_->Join();
@@ -3463,7 +3456,6 @@ void Debugger::StopAgent() {
void Debugger::WaitForAgent() {
- ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL)
agent_->WaitUntilListening();
}
@@ -3472,7 +3464,7 @@ void Debugger::WaitForAgent() {
void Debugger::CallMessageDispatchHandler() {
v8::Debug::DebugMessageDispatchHandler handler;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
handler = Debugger::debug_message_dispatch_handler_;
}
if (handler != NULL) {
@@ -3481,8 +3473,8 @@ void Debugger::CallMessageDispatchHandler() {
}
-EnterDebugger::EnterDebugger()
- : isolate_(Isolate::Current()),
+EnterDebugger::EnterDebugger(Isolate* isolate)
+ : isolate_(isolate),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
@@ -3517,7 +3509,6 @@ EnterDebugger::EnterDebugger()
EnterDebugger::~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
@@ -3632,6 +3623,11 @@ v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
}
+v8::Isolate* MessageImpl::GetIsolate() const {
+ return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
+}
+
+
v8::Handle<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
@@ -3662,7 +3658,7 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = event_data_->GetIsolate();
v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
// Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
@@ -3703,7 +3699,7 @@ v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext(Isolate::Current());
+ return GetDebugEventContext(exec_state_->GetIsolate());
}
@@ -3793,24 +3789,17 @@ void CommandMessageQueue::Expand() {
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
+ : logger_(logger), queue_(size) {}
bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
@@ -3818,48 +3807,42 @@ CommandMessage LockingCommandMessageQueue::Get() {
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Clear();
}
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
- isolate_(isolate), sem_(OS::CreateSemaphore(0)),
- mutex_(OS::CreateMutex()), already_signalled_(false) {
-}
-
-
-MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
- delete sem_;
+ isolate_(isolate), sem_(0),
+ already_signalled_(false) {
}
void MessageDispatchHelperThread::Schedule() {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
if (already_signalled_) {
return;
}
already_signalled_ = true;
}
- sem_->Signal();
+ sem_.Signal();
}
void MessageDispatchHelperThread::Run() {
while (true) {
- sem_->Wait();
+ sem_.Wait();
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
already_signalled_ = false;
}
{
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 67debc754..2b5f43ab4 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -174,7 +174,8 @@ class BreakLocationIterator {
// the cache is the script id.
class ScriptCache : private HashMap {
public:
- ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
+ explicit ScriptCache(Isolate* isolate)
+ : HashMap(ScriptMatch), isolate_(isolate), collected_scripts_(10) {}
virtual ~ScriptCache() { Clear(); }
// Add script to the cache.
@@ -203,6 +204,7 @@ class ScriptCache : private HashMap {
v8::Persistent<v8::Value>* obj,
void* data);
+ Isolate* isolate_;
// List used during GC to temporarily store id's of collected scripts.
List<int> collected_scripts_;
};
@@ -259,7 +261,9 @@ class Debug {
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
- void PrepareStep(StepAction step_action, int step_count);
+ void PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id);
void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
@@ -532,7 +536,7 @@ class Debug {
explicit Debug(Isolate* isolate);
~Debug();
- static bool CompileDebuggerScript(int index);
+ static bool CompileDebuggerScript(Isolate* isolate, int index);
void ClearOneShot();
void ActivateStepIn(StackFrame* frame);
void ClearStepIn();
@@ -664,6 +668,7 @@ class MessageImpl: public v8::Debug::Message {
virtual v8::Handle<v8::String> GetJSON() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
+ virtual v8::Isolate* GetIsolate() const;
private:
MessageImpl(bool is_event,
@@ -762,7 +767,6 @@ class MessageDispatchHelperThread;
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
LockingCommandMessageQueue(Logger* logger, int size);
- ~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
@@ -770,7 +774,7 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
private:
Logger* logger_;
CommandMessageQueue queue_;
- Mutex* lock_;
+ mutable Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
@@ -821,7 +825,7 @@ class Debugger {
void SetEventListener(Handle<Object> callback, Handle<Object> data);
void SetMessageHandler(v8::Debug::MessageHandler2 handler);
void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
+ TimeDelta period);
void SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler,
bool provide_locker);
@@ -863,7 +867,7 @@ class Debugger {
friend void ForceUnloadDebugger(); // In test-debug.cc
inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
@@ -918,7 +922,7 @@ class Debugger {
Handle<Object> event_data);
void ListenersChanged();
- Mutex* debugger_access_; // Mutex guarding debugger variables.
+ RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
Handle<Object> event_listener_; // Global handle to listener.
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
@@ -929,16 +933,16 @@ class Debugger {
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
MessageDispatchHelperThread* message_dispatch_helper_thread_;
- int host_dispatch_micros_;
+ TimeDelta host_dispatch_period_;
DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
LockingCommandMessageQueue command_queue_;
- Semaphore* command_received_; // Signaled for each command received.
+ Semaphore command_received_; // Signaled for each command received.
LockingCommandMessageQueue event_command_queue_;
Isolate* isolate_;
@@ -956,7 +960,7 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
- EnterDebugger();
+ explicit EnterDebugger(Isolate* isolate);
~EnterDebugger();
// Check whether the debugger could be entered.
@@ -983,12 +987,12 @@ class EnterDebugger BASE_EMBEDDED {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
+ explicit DisableBreak(Isolate* isolate, bool disable_break)
+ : isolate_(isolate) {
prev_disable_break_ = isolate_->debug()->disable_break();
isolate_->debug()->set_disable_break(disable_break);
}
~DisableBreak() {
- ASSERT(Isolate::Current() == isolate_);
isolate_->debug()->set_disable_break(prev_disable_break_);
}
@@ -1047,7 +1051,7 @@ class Debug_Address {
class MessageDispatchHelperThread: public Thread {
public:
explicit MessageDispatchHelperThread(Isolate* isolate);
- ~MessageDispatchHelperThread();
+ ~MessageDispatchHelperThread() {}
void Schedule();
@@ -1055,8 +1059,8 @@ class MessageDispatchHelperThread: public Thread {
void Run();
Isolate* isolate_;
- Semaphore* const sem_;
- Mutex* const mutex_;
+ Semaphore sem_;
+ Mutex mutex_;
bool already_signalled_;
DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index dc9ffc511..c979a534d 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -56,11 +56,10 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
- current_(NULL),
#ifdef ENABLE_DEBUGGER_SUPPORT
deoptimized_frame_info_(NULL),
#endif
- deoptimizing_code_list_(NULL) {
+ current_(NULL) {
for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
deopt_entry_code_entries_[i] = -1;
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
@@ -73,14 +72,6 @@ DeoptimizerData::~DeoptimizerData() {
allocator_->Free(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
-
- DeoptimizingCodeListNode* current = deoptimizing_code_list_;
- while (current != NULL) {
- DeoptimizingCodeListNode* prev = current;
- current = current->next();
- delete prev;
- }
- deoptimizing_code_list_ = NULL;
}
@@ -93,33 +84,19 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
-Code* DeoptimizerData::FindDeoptimizingCode(Address addr) {
- for (DeoptimizingCodeListNode* node = deoptimizing_code_list_;
- node != NULL;
- node = node->next()) {
- if (node->code()->contains(addr)) return *node->code();
- }
- return NULL;
-}
-
-
-void DeoptimizerData::RemoveDeoptimizingCode(Code* code) {
- for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_;
- cur != NULL;
- prev = cur, cur = cur->next()) {
- if (*cur->code() == code) {
- if (prev == NULL) {
- deoptimizing_code_list_ = cur->next();
- } else {
- prev->set_next(cur->next());
- }
- delete cur;
- return;
+Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
+ if (function_->IsHeapObject()) {
+ // Search all deoptimizing code in the native context of the function.
+ Context* native_context = function_->context()->native_context();
+ Object* element = native_context->DeoptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ if (code->contains(addr)) return code;
+ element = code->next_code_link();
}
}
- // Deoptimizing code is removed through weak callback. Each object is expected
- // to be removed once and only once.
- UNREACHABLE();
+ return NULL;
}
@@ -289,27 +266,42 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
- Isolate* isolate = context->GetIsolate();
- Zone zone(isolate);
DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
visitor->EnterContext(context);
- // Create a snapshot of the optimized functions list. This is needed because
- // visitors might remove more than one link from the list at once.
- ZoneList<JSFunction*> snapshot(1, &zone);
+ // Visit the list of optimized functions, removing elements that
+ // no longer refer to optimized code.
+ JSFunction* prev = NULL;
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- snapshot.Add(element_function, &zone);
- element = element_function->next_function_link();
- }
-
- // Run through the snapshot of optimized functions and visit them.
- for (int i = 0; i < snapshot.length(); ++i) {
- visitor->VisitFunction(snapshot.at(i));
+ JSFunction* function = JSFunction::cast(element);
+ Object* next = function->next_function_link();
+ if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
+ (visitor->VisitFunction(function),
+ function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
+ // The function no longer refers to optimized code, or the visitor
+ // changed the code to which it refers to no longer be optimized code.
+ // Remove the function from this list.
+ if (prev != NULL) {
+ prev->set_next_function_link(next);
+ } else {
+ context->SetOptimizedFunctionsListHead(next);
+ }
+ // The visitor should not alter the link directly.
+ ASSERT(function->next_function_link() == next);
+ // Set the next function link to undefined to indicate it is no longer
+ // in the optimized functions list.
+ function->set_next_function_link(context->GetHeap()->undefined_value());
+ } else {
+ // The visitor should not alter the link directly.
+ ASSERT(function->next_function_link() == next);
+ // preserve this element.
+ prev = function;
+ }
+ element = next;
}
visitor->LeaveContext(context);
@@ -321,7 +313,7 @@ void Deoptimizer::VisitAllOptimizedFunctions(
OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
- // Run through the list of all native contexts and deoptimize.
+ // Run through the list of all native contexts.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
@@ -330,217 +322,161 @@ void Deoptimizer::VisitAllOptimizedFunctions(
}
-// Removes the functions selected by the given filter from the optimized
-// function list of the given context and adds their code to the list of
-// code objects to be deoptimized.
-static void SelectCodeToDeoptimize(Context* context,
- OptimizedFunctionFilter* filter,
- ZoneList<Code*>* codes,
- Zone* zone,
- Object* undefined) {
+// Unlink functions referring to code marked for deoptimization, then move
+// marked code from the optimized code list to the deoptimized code list,
+// and patch code for lazy deopt.
+void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
DisallowHeapAllocation no_allocation;
- Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- Object* remainder_head = undefined;
- Object* remainder_tail = undefined;
-
- // TODO(titzer): rewrite to not modify unselected functions.
- while (current != undefined) {
- JSFunction* function = JSFunction::cast(current);
- current = function->next_function_link();
- if (filter->TakeFunction(function)) {
- // Extract this function from the context's list and remember the code.
+
+ // A "closure" that unlinks optimized code that is going to be
+ // deoptimized from the functions that refer to it.
+ class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
+ public:
+ virtual void EnterContext(Context* context) { } // Don't care.
+ virtual void LeaveContext(Context* context) { } // Don't care.
+ virtual void VisitFunction(JSFunction* function) {
Code* code = function->code();
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
- if (code->marked_for_deoptimization()) {
- ASSERT(codes->Contains(code));
- } else {
- code->set_marked_for_deoptimization(true);
- codes->Add(code, zone);
- }
+ if (!code->marked_for_deoptimization()) return;
+
+ // Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
- // Replace the function's code with the shared code.
function->set_code(shared->code());
- // Evict the code from the optimized code map.
shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
- // Remove the function from the optimized functions list.
- function->set_next_function_link(undefined);
if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
+ PrintF("[deoptimizer unlinked: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
- } else {
- // Don't select this function; link it back into the list.
- if (remainder_head == undefined) {
- remainder_head = function;
- } else {
- JSFunction::cast(remainder_tail)->set_next_function_link(function);
- }
- remainder_tail = function;
}
- }
- if (remainder_tail != undefined) {
- JSFunction::cast(remainder_tail)->set_next_function_link(undefined);
- }
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head);
-}
+ };
+ // Unlink all functions that refer to marked code.
+ SelectedCodeUnlinker unlinker;
+ VisitAllOptimizedFunctionsForContext(context, &unlinker);
-class DeoptimizeAllFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return true;
- }
-};
+ // Move marked code from the optimized code list to the deoptimized
+ // code list, collecting them into a ZoneList.
+ Isolate* isolate = context->GetHeap()->isolate();
+ Zone zone(isolate);
+ ZoneList<Code*> codes(10, &zone);
+ // Walk over all optimized code objects in this native context.
+ Code* prev = NULL;
+ Object* element = context->OptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ Object* next = code->next_code_link();
+ if (code->marked_for_deoptimization()) {
+ // Put the code into the list for later patching.
+ codes.Add(code, &zone);
+
+ if (prev != NULL) {
+ // Skip this code in the optimized code list.
+ prev->set_next_code_link(next);
+ } else {
+ // There was no previous node, the next node is the new head.
+ context->SetOptimizedCodeListHead(next);
+ }
-class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
- public:
- explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {}
- virtual bool TakeFunction(JSFunction* function) {
- return function->code() == code_;
+ // Move the code to the _deoptimized_ code list.
+ code->set_next_code_link(context->DeoptimizedCodeListHead());
+ context->SetDeoptimizedCodeListHead(code);
+ } else {
+ // Not marked; preserve this element.
+ prev = code;
+ }
+ element = next;
}
- private:
- Code* code_;
-};
+ // TODO(titzer): we need a handle scope only because of the macro assembler,
+ // which is only used in EnsureCodeForDeoptimizationEntry.
+ HandleScope scope(isolate);
+ // Now patch all the codes for deoptimization.
+ for (int i = 0; i < codes.length(); i++) {
+ // It is finally time to die, code object.
+ // Do platform-specific patching to force any activations to lazy deopt.
+ PatchCodeForDeoptimization(isolate, codes[i]);
-class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return function->code()->marked_for_deoptimization();
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
}
-};
+}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
- DisallowHeapAllocation no_allocation;
-
if (FLAG_trace_deopt) {
- PrintF("[deoptimize all contexts]\n");
+ PrintF("[deoptimize all code in all contexts]\n");
}
-
- DeoptimizeAllFilter filter;
- DeoptimizeAllFunctionsWith(isolate, &filter);
-}
-
-
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
DisallowHeapAllocation no_allocation;
- DeoptimizeAllFilter filter;
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- ASSERT(proto->IsJSGlobalObject());
- DeoptimizeAllFunctionsForContext(
- GlobalObject::cast(proto)->native_context(), &filter);
- } else if (object->IsGlobalObject()) {
- DeoptimizeAllFunctionsForContext(
- GlobalObject::cast(object)->native_context(), &filter);
+ // For all contexts, mark all code, then deoptimize.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ context = native_context->get(Context::NEXT_CONTEXT_LINK);
}
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- Code* code = function->code();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
- DeoptimizeWithMatchingCodeFilter filter(code);
- DeoptimizeAllFunctionsForContext(
- function->context()->native_context(), &filter);
-}
-
-
-void Deoptimizer::DeoptimizeAllFunctionsForContext(
- Context* context, OptimizedFunctionFilter* filter) {
- ASSERT(context->IsNativeContext());
- Isolate* isolate = context->GetIsolate();
- Object* undefined = isolate->heap()->undefined_value();
- Zone zone(isolate);
- ZoneList<Code*> codes(4, &zone);
- SelectCodeToDeoptimize(context, filter, &codes, &zone, undefined);
- for (int i = 0; i < codes.length(); i++) {
- DeoptimizeCode(isolate, codes.at(i));
+void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize marked code in all contexts]\n");
}
-}
-
-
-void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
- OptimizedFunctionFilter* filter) {
DisallowHeapAllocation no_allocation;
-
- // Run through the list of all native contexts and deoptimize.
+ // For all contexts, deoptimize code already marked.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
- DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ Context* native_context = Context::cast(context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ context = native_context->get(Context::NEXT_CONTEXT_LINK);
}
}
-void Deoptimizer::DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes) {
- if (codes->length() == 0) return; // Nothing to do.
-
- // Mark the code; any functions refering to this code will be selected.
- for (int i = 0; i < codes->length(); i++) {
- ASSERT(!codes->at(i)->marked_for_deoptimization());
- codes->at(i)->set_marked_for_deoptimization(true);
- }
-
- // For all contexts, remove optimized functions that refer to the selected
- // code from the optimized function lists.
- Object* undefined = isolate->heap()->undefined_value();
- Zone zone(isolate);
- Object* list = isolate->heap()->native_contexts_list();
- DeoptimizeMarkedCodeFilter filter;
- while (!list->IsUndefined()) {
- Context* context = Context::cast(list);
- // Note that selecting code unlinks the functions that refer to it.
- SelectCodeToDeoptimize(context, &filter, codes, &zone, undefined);
- list = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
+ reinterpret_cast<intptr_t>(object));
}
-
- // Now deoptimize all the code.
- for (int i = 0; i < codes->length(); i++) {
- DeoptimizeCode(isolate, codes->at(i));
+ if (object->IsJSGlobalProxy()) {
+ Object* proto = object->GetPrototype();
+ ASSERT(proto->IsJSGlobalObject());
+ Context* native_context = GlobalObject::cast(proto)->native_context();
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ } else if (object->IsGlobalObject()) {
+ Context* native_context = GlobalObject::cast(object)->native_context();
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
}
}
-void Deoptimizer::DeoptimizeCode(Isolate* isolate, Code* code) {
- HandleScope scope(isolate);
- DisallowHeapAllocation nha;
-
- // Do platform-specific patching of the optimized code.
- PatchCodeForDeoptimization(isolate, code);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+void Deoptimizer::MarkAllCodeForContext(Context* context) {
+ Object* element = context->OptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ code->set_marked_for_deoptimization(true);
+ element = code->next_code_link();
+ }
}
-void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* parameter) {
- DeoptimizingCodeListNode* node =
- reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
- DeoptimizerData* data =
- reinterpret_cast<Isolate*>(isolate)->deoptimizer_data();
- data->RemoveDeoptimizingCode(*node->code());
-#ifdef DEBUG
- for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
- current != NULL;
- current = current->next()) {
- ASSERT(current != node);
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ Code* code = function->code();
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ // Mark the code for deoptimization and unlink any functions that also
+ // refer to that code. The code cannot be shared across native contexts,
+ // so we only need to search one.
+ code->set_marked_for_deoptimization(true);
+ DeoptimizeMarkedCodeForContext(function->context()->native_context());
}
-#endif
}
@@ -559,8 +495,6 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
return (frame_type == StackFrame::STUB)
? FLAG_trace_stub_failures
: FLAG_trace_deopt;
- case OSR:
- return FLAG_trace_osr;
}
UNREACHABLE();
return false;
@@ -573,7 +507,6 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
case SOFT: return "soft";
case LAZY: return "lazy";
case DEBUGGER: return "debugger";
- case OSR: return "OSR";
}
UNREACHABLE();
return NULL;
@@ -627,6 +560,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
}
}
compiled_code_ = FindOptimizedCode(function, optimized_code);
+
+#if DEBUG
+ ASSERT(compiled_code_ != NULL);
+ if (type == EAGER || type == SOFT || type == LAZY) {
+ ASSERT(compiled_code_->kind() != Code::FUNCTION);
+ }
+#endif
+
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
@@ -647,21 +588,11 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
case Deoptimizer::LAZY: {
- Code* compiled_code =
- isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
+ Code* compiled_code = FindDeoptimizingCode(from_);
return (compiled_code == NULL)
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
- case Deoptimizer::OSR: {
- // The function has already been optimized and we're transitioning
- // from the unoptimized shared version to the optimized one in the
- // function. The return address (from_) points to unoptimized code.
- Code* compiled_code = function->code();
- ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!compiled_code->contains(from_));
- return compiled_code;
- }
case Deoptimizer::DEBUGGER:
ASSERT(optimized_code->contains(from_));
return optimized_code;
@@ -765,11 +696,18 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
- DeoptimizingCodeListNode* node =
- isolate->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- length++;
- node = node->next();
+ // Count all entries in the deoptimizing code list of every context.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ Object* element = native_context->DeoptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ length++;
+ element = code->next_code_link();
+ }
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
return length;
}
@@ -778,18 +716,14 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
- if (bailout_type_ == OSR) {
- DoComputeOsrOutputFrame();
- return;
- }
-
// Print some helpful diagnostic information.
- int64_t start = OS::Ticks();
if (FLAG_log_timer_events &&
compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
+ ElapsedTimer timer;
if (trace_) {
+ timer.Start();
PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
@@ -870,7 +804,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Print some helpful diagnostic information.
if (trace_) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
@@ -1696,13 +1630,25 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArray::cast(*elements));
+ object->set_elements(FixedArrayBase::cast(*elements));
for (int i = 0; i < length - 3; ++i) {
Handle<Object> value = MaterializeNextValue();
object->FastPropertyAtPut(i, *value);
}
break;
}
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object =
+ isolate_->factory()->NewJSArray(0, map->elements_kind());
+ materialized_objects_->Add(object);
+ Handle<Object> properties = MaterializeNextValue();
+ Handle<Object> elements = MaterializeNextValue();
+ Handle<Object> length = MaterializeNextValue();
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ break;
+ }
default:
PrintF("[couldn't handle instance type %d]\n", map->instance_type());
UNREACHABLE();
@@ -2391,252 +2337,69 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
-bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset) {
- disasm::NameConverter converter;
- FrameDescription* output = output_[0];
-
- // The input values are all part of the unoptimized frame so they
- // are all tagged pointers.
- uintptr_t input_value = input_->GetFrameSlot(*input_offset);
- Object* input_object = reinterpret_cast<Object*>(input_value);
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
- UNREACHABLE(); // Malformed input.
- return false;
-
- case Translation::REGISTER: {
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- input_value,
- *input_offset);
- }
- output->SetRegister(output_reg, input_value);
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int32_t int32_value = 0;
- if (!input_object->ToInt32(&int32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %d (int32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- int32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, int32_value);
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- uint32_t uint32_value = 0;
- if (!input_object->ToUint32(&uint32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- uint32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
- }
-
-
- case Translation::DOUBLE_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_reg = iterator->Next();
- double double_value = input_object->Number();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %g (double) ; [sp + %d]\n",
- DoubleRegister::AllocationIndexToString(output_reg),
- double_value,
- *input_offset);
- }
- output->SetDoubleRegister(output_reg, double_value);
- break;
- }
-
- case Translation::STACK_SLOT: {
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
- output_offset,
- input_value,
- *input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
- }
- output->SetFrameSlot(output_offset, input_value);
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int32_t int32_value = 0;
- if (!input_object->ToInt32(&int32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
- output_offset,
- int32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, int32_value);
- break;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- uint32_t uint32_value = 0;
- if (!input_object->ToUint32(&uint32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
- output_offset,
- uint32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- static const int kLowerOffset = 0 * kPointerSize;
- static const int kUpperOffset = 1 * kPointerSize;
-
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- double double_value = input_object->Number();
- uint64_t int_value = BitCast<uint64_t, double>(double_value);
- int32_t lower = static_cast<int32_t>(int_value);
- int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
- output_offset + kUpperOffset,
- upper,
- double_value,
- *input_offset);
- PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
- output_offset + kLowerOffset,
- lower,
- double_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset + kLowerOffset, lower);
- output->SetFrameSlot(output_offset + kUpperOffset, upper);
- break;
- }
-
- case Translation::LITERAL: {
- // Just ignore non-materialized literals.
- iterator->Next();
- break;
- }
-
- case Translation::DUPLICATED_OBJECT:
- case Translation::ARGUMENTS_OBJECT:
- case Translation::CAPTURED_OBJECT: {
- // Optimized code assumes that the argument object has not been
- // materialized and so bypasses it when doing arguments access.
- // We should have bailed out before starting the frame
- // translation.
- UNREACHABLE();
- return false;
- }
- }
-
- *input_offset -= kPointerSize;
- return true;
-}
-
+void Deoptimizer::PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* replacement_code =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
- int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
+ for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
- PatchInterruptCodeAt(unoptimized_code,
+ ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
+ unoptimized,
+ back_edges.pc()));
+ PatchInterruptCodeAt(unoptimized,
back_edges.pc(),
- interrupt_code,
replacement_code);
}
}
- unoptimized_code->set_back_edges_patched_for_osr(true);
-#ifdef DEBUG
- Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
-#endif // DEBUG
+ unoptimized->set_back_edges_patched_for_osr(true);
+ ASSERT(Deoptimizer::VerifyInterruptCode(
+ isolate, unoptimized, loop_nesting_level));
}
-void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* interrupt_code =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
// Iterate over the back edge table and revert the patched interrupt calls.
- ASSERT(unoptimized_code->back_edges_patched_for_osr());
- int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
+ ASSERT(unoptimized->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
+ for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
- RevertInterruptCodeAt(unoptimized_code,
- back_edges.pc(),
- interrupt_code,
- replacement_code);
+ ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
+ unoptimized,
+ back_edges.pc()));
+ RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
}
}
- unoptimized_code->set_back_edges_patched_for_osr(false);
- unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
-#ifdef DEBUG
+ unoptimized->set_back_edges_patched_for_osr(false);
+ unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
- Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, -1);
-#endif // DEBUG
+ ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
}
#ifdef DEBUG
-void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized,
int loop_nesting_level) {
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
+ DisallowHeapAllocation no_gc;
+ for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
uint32_t loop_depth = back_edges.loop_depth();
@@ -2644,11 +2407,11 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- InterruptCodeIsPatched(unoptimized_code,
- back_edges.pc(),
- interrupt_code,
- replacement_code));
+ GetInterruptPatchState(isolate,
+ unoptimized,
+ back_edges.pc()) != NOT_PATCHED);
}
+ return true;
}
#endif // DEBUG
@@ -2659,12 +2422,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// into account so we have to avoid double counting them (-2).
unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
#ifdef DEBUG
- if (bailout_type_ == OSR) {
- // TODO(kasperl): It would be nice if we could verify that the
- // size matches with the stack height we can compute based on the
- // environment at the OSR entry. The code for that his built into
- // the DoComputeOsrOutputFrame function for now.
- } else if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
@@ -3103,22 +2861,6 @@ const char* Translation::StringFor(Opcode opcode) {
#endif
-DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
- GlobalHandles* global_handles = code->GetIsolate()->global_handles();
- // Globalize the code object and make it weak.
- code_ = Handle<Code>::cast(global_handles->Create(code));
- global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- Deoptimizer::HandleWeakDeoptimizedCode);
-}
-
-
-DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles* global_handles = code_->GetIsolate()->global_handles();
- global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
-}
-
-
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index e5afd1ae6..7ee5908f7 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -58,7 +58,6 @@ static inline double read_double_value(Address p) {
class FrameDescription;
class TranslationIterator;
-class DeoptimizingCodeListNode;
class DeoptimizedFrameInfo;
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
@@ -121,29 +120,22 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
-class OptimizedFunctionFilter BASE_EMBEDDED {
- public:
- virtual ~OptimizedFunctionFilter() {}
-
- virtual bool TakeFunction(JSFunction* function) = 0;
-};
-
-
-class Deoptimizer;
-
-
class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
SOFT,
- OSR,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};
+ enum InterruptPatchState {
+ NOT_PATCHED,
+ PATCHED_FOR_OSR
+ };
+
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -203,68 +195,54 @@ class Deoptimizer : public Malloced {
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
-
- // Deoptimize all functions in the heap.
+ // Deoptimize all code in the given isolate.
static void DeoptimizeAll(Isolate* isolate);
+ // Deoptimize code associated with the given global object.
static void DeoptimizeGlobalObject(JSObject* object);
- static void DeoptimizeAllFunctionsWith(Isolate* isolate,
- OptimizedFunctionFilter* filter);
-
- static void DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes);
-
- static void DeoptimizeAllFunctionsForContext(
- Context* context, OptimizedFunctionFilter* filter);
-
- static void VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor);
+ // Deoptimizes all optimized code that has been previously marked
+ // (via code->set_marked_for_deoptimization) and unlinks all functions that
+ // refer to that code.
+ static void DeoptimizeMarkedCode(Isolate* isolate);
- static void VisitAllOptimizedFunctions(Isolate* isolate,
- OptimizedFunctionVisitor* visitor);
+ // Visit all the known optimized functions in a given isolate.
+ static void VisitAllOptimizedFunctions(
+ Isolate* isolate, OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
- static void PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
- static void RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ Code* interrupt_code);
#ifdef DEBUG
- static bool InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
- static void VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+ static bool VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
@@ -377,7 +355,6 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeOsrOutputFrame();
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index);
@@ -403,13 +380,6 @@ class Deoptimizer : public Malloced {
unsigned output_offset,
DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
- // Translate a command for OSR. Updates the input offset to be used for
- // the next command. Returns false if translation of the command failed
- // (e.g., a number conversion failed) and may or may not have updated the
- // input offset.
- bool DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset);
-
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -443,17 +413,24 @@ class Deoptimizer : public Malloced {
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
- // Weak handle callback for deoptimizing code objects.
- static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ // Marks all the code in the given context for deoptimization.
+ static void MarkAllCodeForContext(Context* native_context);
- // Deoptimize the given code and add to appropriate deoptimization lists.
- static void DeoptimizeCode(Isolate* isolate, Code* code);
+ // Visit all the known optimized functions in a given context.
+ static void VisitAllOptimizedFunctionsForContext(
+ Context* context, OptimizedFunctionVisitor* visitor);
+
+ // Deoptimizes all code marked in the given context.
+ static void DeoptimizeMarkedCodeForContext(Context* native_context);
// Patch the given code so that it will deoptimize itself.
static void PatchCodeForDeoptimization(Isolate* isolate, Code* code);
+ // Searches the list of known deoptimizing code for a Code object
+ // containing the given address (which is supposedly faster than
+ // searching all code objects).
+ Code* FindDeoptimizingCode(Address addr);
+
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
// deoptimizations the input frame is filled in generated code.
@@ -515,7 +492,6 @@ class Deoptimizer : public Malloced {
static const int table_entry_size_;
friend class FrameDescription;
- friend class DeoptimizingCodeListNode;
friend class DeoptimizedFrameInfo;
};
@@ -689,24 +665,16 @@ class DeoptimizerData {
void Iterate(ObjectVisitor* v);
#endif
- Code* FindDeoptimizingCode(Address addr);
- void RemoveDeoptimizingCode(Code* code);
-
private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
- Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
#endif
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- DeoptimizingCodeListNode* deoptimizing_code_list_;
+ Deoptimizer* current_;
friend class Deoptimizer;
@@ -824,26 +792,6 @@ class Translation BASE_EMBEDDED {
};
-// Linked list holding deoptimizing code objects. The deoptimizing code objects
-// are kept as weak handles until they are no longer activated on the stack.
-class DeoptimizingCodeListNode : public Malloced {
- public:
- explicit DeoptimizingCodeListNode(Code* code);
- ~DeoptimizingCodeListNode();
-
- DeoptimizingCodeListNode* next() const { return next_; }
- void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
- Handle<Code> code() const { return code_; }
-
- private:
- // Global (weak) handle to the deoptimizing code object.
- Handle<Code> code_;
-
- // Next pointer for linked list.
- DeoptimizingCodeListNode* next_;
-};
-
-
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index fa8ae1ffc..dd620fb34 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -71,7 +71,7 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = Isolate::Current()->builtins()->Lookup(pc);
+ const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
if (name != NULL) {
OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
@@ -117,8 +117,8 @@ static int DecodeIt(Isolate* isolate,
byte* end) {
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder;
- Heap* heap = HEAP;
+ ExternalReferenceEncoder ref_encoder(isolate);
+ Heap* heap = isolate->heap();
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
index 8e8236347..afb8f9e54 100644
--- a/deps/v8/src/effects.h
+++ b/deps/v8/src/effects.h
@@ -55,7 +55,7 @@ struct Effect {
Modality modality;
Bounds bounds;
- Effect() {}
+ Effect() : modality(DEFINITE) {}
Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
// The unknown effect.
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 77abf4e42..89621cb36 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -154,7 +154,8 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
+ ASSERT(to_base->map() !=
+ from_base->GetIsolate()->heap()->fixed_cow_array_map());
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -492,7 +493,6 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Isolate* isolate = Isolate::Current();
Code* apply_builtin = isolate->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
@@ -581,14 +581,8 @@ class ElementsAccessorBase : public ElementsAccessor {
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
- Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
- Heap* heap = holder->GetHeap();
- if (map == heap->one_pointer_filler_map() ||
- map == heap->two_pointer_filler_map() ||
- map == heap->free_space_map()) {
- return;
- }
+ if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index ecfa1db1e..979641a9d 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -148,7 +148,8 @@ static Handle<Object> Invoke(bool is_construct,
}
-Handle<Object> Execution::Call(Handle<Object> callable,
+Handle<Object> Execution::Call(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@@ -157,7 +158,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
*pending_exception = false;
if (!callable->IsJSFunction()) {
- callable = TryGetFunctionDelegate(callable, pending_exception);
+ callable = TryGetFunctionDelegate(isolate, callable, pending_exception);
if (*pending_exception) return callable;
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
@@ -174,7 +175,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
receiver = Handle<Object>(global, func->GetIsolate());
}
} else {
- receiver = ToObject(receiver, pending_exception);
+ receiver = ToObject(isolate, receiver, pending_exception);
}
if (*pending_exception) return callable;
}
@@ -234,9 +235,9 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
}
-Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
+Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
// If you return a function from here, it will be called when an
@@ -261,10 +262,10 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
}
-Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
+Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If object is a function proxy, get its handler. Iterate if necessary.
Object* fun = *object;
@@ -292,9 +293,9 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
}
-Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
+Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -319,10 +320,10 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
Handle<Object> Execution::TryGetConstructorDelegate(
+ Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -458,6 +459,22 @@ void StackGuard::RequestGC() {
}
+bool StackGuard::IsInstallCodeRequest() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & INSTALL_CODE) != 0;
+}
+
+
+void StackGuard::RequestInstallCode() {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= INSTALL_CODE;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+
bool StackGuard::IsFullDeopt() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
@@ -596,54 +613,60 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
do { \
- Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \
ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
+ return Call(isolate, \
+ isolate->name##_fun(), \
isolate->js_builtins_object(), \
ARRAY_SIZE(argv), argv, \
has_pending_exception); \
} while (false)
-Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToNumber(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
-Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToString(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, { obj }, exc);
}
-Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToDetailString(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
}
-Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToObject(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, { obj }, exc);
}
-Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToInteger(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, { obj }, exc);
}
-Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToUint32(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
}
-Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToInt32(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, { obj }, exc);
}
-Handle<Object> Execution::NewDate(double time, bool* exc) {
- Isolate* isolate = Isolate::Current();
+Handle<Object> Execution::NewDate(Isolate* isolate, double time, bool* exc) {
Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
@@ -698,15 +721,18 @@ Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data,
bool* exc) {
Isolate* isolate = data->GetIsolate();
- // Fast case: see if the function has already been instantiated
- int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ if (!data->do_not_cache()) {
+ // Fast case: see if the function has already been instantiated
+ int serial_number = Smi::cast(data->serial_number())->value();
+ Object* elm =
+ isolate->native_context()->function_cache()->
+ GetElementNoExceptionThrown(isolate, serial_number);
+ if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ }
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
+ Handle<Object> result = Call(isolate,
+ isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -738,7 +764,8 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
return Handle<JSObject>(JSObject::cast(result));
} else {
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
+ Handle<Object> result = Call(isolate,
+ isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -749,12 +776,13 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
}
-void Execution::ConfigureInstance(Handle<Object> instance,
+void Execution::ConfigureInstance(Isolate* isolate,
+ Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
- Isolate* isolate = Isolate::Current();
Handle<Object> args[] = { instance, instance_template };
- Execution::Call(isolate->configure_instance_fun(),
+ Execution::Call(isolate,
+ isolate->configure_instance_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -782,9 +810,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
}
-static Object* RuntimePreempt() {
- Isolate* isolate = Isolate::Current();
-
+static Object* RuntimePreempt(Isolate* isolate) {
// Clear the preempt request flag.
isolate->stack_guard()->Continue(PREEMPT);
@@ -813,9 +839,7 @@ static Object* RuntimePreempt() {
#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* Execution::DebugBreakHelper() {
- Isolate* isolate = Isolate::Current();
-
+Object* Execution::DebugBreakHelper(Isolate* isolate) {
// Just continue if breaks are disabled.
if (isolate->debug()->disable_break()) {
return isolate->heap()->undefined_value();
@@ -861,15 +885,15 @@ Object* Execution::DebugBreakHelper() {
// Clear the debug break request flag.
isolate->stack_guard()->Continue(DEBUGBREAK);
- ProcessDebugMessages(debug_command_only);
+ ProcessDebugMessages(isolate, debug_command_only);
// Return to continue execution.
return isolate->heap()->undefined_value();
}
-void Execution::ProcessDebugMessages(bool debug_command_only) {
- Isolate* isolate = Isolate::Current();
+void Execution::ProcessDebugMessages(Isolate* isolate,
+ bool debug_command_only) {
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);
@@ -880,7 +904,7 @@ void Execution::ProcessDebugMessages(bool debug_command_only) {
HandleScope scope(isolate);
// Enter the debugger. Just continue if we fail to enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate);
if (debugger.FailedToEnter()) {
return;
}
@@ -908,13 +932,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
isolate->counters()->stack_interrupts()->Increment();
isolate->counters()->runtime_profiler_ticks()->Increment();
- isolate->runtime_profiler()->OptimizeNow();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
- DebugBreakHelper();
+ DebugBreakHelper(isolate);
}
#endif
- if (stack_guard->IsPreempted()) RuntimePreempt();
+ if (stack_guard->IsPreempted()) RuntimePreempt(isolate);
if (stack_guard->IsTerminateExecution()) {
stack_guard->Continue(TERMINATE);
return isolate->TerminateExecution();
@@ -927,6 +950,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
+ if (stack_guard->IsInstallCodeRequest()) {
+ ASSERT(FLAG_concurrent_recompilation);
+ stack_guard->Continue(INSTALL_CODE);
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+ isolate->runtime_profiler()->OptimizeNow();
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index c6bf63d72..371ea309d 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -42,7 +42,8 @@ enum InterruptFlag {
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
GC_REQUEST = 1 << 5,
- FULL_DEOPT = 1 << 6
+ FULL_DEOPT = 1 << 6,
+ INSTALL_CODE = 1 << 7
};
@@ -62,7 +63,8 @@ class Execution : public AllStatic {
// and the function called is not in strict mode, receiver is converted to
// an object.
//
- static Handle<Object> Call(Handle<Object> callable,
+ static Handle<Object> Call(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@@ -92,28 +94,36 @@ class Execution : public AllStatic {
bool* caught_exception);
// ECMA-262 9.3
- static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToNumber(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.4
- static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToInteger(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.5
- static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToInt32(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.6
- static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToUint32(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
- static Handle<Object> ToString(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToString(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
- static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToDetailString(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.9
- static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToObject(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// Create a new date object from 'time'.
- static Handle<Object> NewDate(double time, bool* exc);
+ static Handle<Object> NewDate(
+ Isolate* isolate, double time, bool* exc);
// Create a new regular expression object from 'pattern' and 'flags'.
static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
@@ -128,7 +138,8 @@ class Execution : public AllStatic {
Handle<FunctionTemplateInfo> data, bool* exc);
static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
bool* exc);
- static void ConfigureInstance(Handle<Object> instance,
+ static void ConfigureInstance(Isolate* isolate,
+ Handle<Object> instance,
Handle<Object> data,
bool* exc);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
@@ -136,8 +147,8 @@ class Execution : public AllStatic {
Handle<Object> pos,
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* DebugBreakHelper();
- static void ProcessDebugMessages(bool debug_command_only);
+ static Object* DebugBreakHelper(Isolate* isolate);
+ static void ProcessDebugMessages(Isolate* isolate, bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
@@ -147,14 +158,18 @@ class Execution : public AllStatic {
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
- static Handle<Object> GetFunctionDelegate(Handle<Object> object);
- static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
+ static Handle<Object> GetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object);
+ static Handle<Object> TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as constructors.
- static Handle<Object> GetConstructorDelegate(Handle<Object> object);
- static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
+ static Handle<Object> GetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object);
+ static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception);
};
@@ -199,6 +214,8 @@ class StackGuard {
#endif
bool IsGCRequest();
void RequestGC();
+ bool IsInstallCodeRequest();
+ void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
void Continue(InterruptFlag after_what);
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index a3630fb9f..5fd821b9c 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -103,7 +103,8 @@ void ExternalizeStringExtension::Externalize(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
if (result && !string->IsInternalizedString()) {
- HEAP->external_string_table()->AddString(*string);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
} else {
@@ -113,7 +114,8 @@ void ExternalizeStringExtension::Externalize(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsInternalizedString()) {
- HEAP->external_string_table()->AddString(*string);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
}
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 036b60cb2..308879115 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -39,10 +39,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
if (args[0]->BooleanValue()) {
- HEAP->CollectGarbage(NEW_SPACE, "gc extension");
+ isolate->heap()->CollectGarbage(NEW_SPACE, "gc extension");
} else {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
}
diff --git a/deps/v8/src/extensions/i18n/break-iterator.cc b/deps/v8/src/extensions/i18n/break-iterator.cc
deleted file mode 100644
index 0681e264a..000000000
--- a/deps/v8/src/extensions/i18n/break-iterator.cc
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "break-iterator.h"
-
-#include <string.h>
-
-#include "i18n-utils.h"
-#include "unicode/brkiter.h"
-#include "unicode/locid.h"
-#include "unicode/rbbi.h"
-
-namespace v8_i18n {
-
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
-static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object>,
- v8::Handle<v8::Value>);
-static icu::BreakIterator* InitializeBreakIterator(v8::Handle<v8::String>,
- v8::Handle<v8::Object>,
- v8::Handle<v8::Object>);
-static icu::BreakIterator* CreateICUBreakIterator(const icu::Locale&,
- v8::Handle<v8::Object>);
-static void SetResolvedSettings(const icu::Locale&,
- icu::BreakIterator*,
- v8::Handle<v8::Object>);
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(
- v8::Handle<v8::Object> obj) {
- v8::HandleScope handle_scope;
-
- // v8::ObjectTemplate doesn't have HasInstance method so we can't check
- // if obj is an instance of BreakIterator class. We'll check for a property
- // that has to be in the object. The same applies to other services, like
- // Collator and DateTimeFormat.
- if (obj->HasOwnProperty(v8::String::New("breakIterator"))) {
- return static_cast<icu::BreakIterator*>(
- obj->GetAlignedPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param) {
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a break iterator.
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
- delete UnpackBreakIterator(handle);
-
- delete static_cast<icu::UnicodeString*>(
- handle->GetAlignedPointerFromInternalField(1));
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
-}
-
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("BreakIterator method called on an object "
- "that is not a BreakIterator.")));
-}
-
-
-// Deletes the old value and sets the adopted text in corresponding
-// JavaScript object.
-icu::UnicodeString* ResetAdoptedText(
- v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
- // Get the previous value from the internal field.
- icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
- obj->GetAlignedPointerFromInternalField(1));
- delete text;
-
- // Assign new value to the internal pointer.
- v8::String::Value text_value(value);
- text = new icu::UnicodeString(
- reinterpret_cast<const UChar*>(*text_value), text_value.length());
- obj->SetAlignedPointerInInternalField(1, text);
-
- // Return new unicode string pointer.
- return text;
-}
-
-void BreakIterator::JSInternalBreakIteratorAdoptText(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New(
- "Internal error. Iterator and text have to be specified.")));
- return;
- }
-
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- break_iterator->setText(*ResetAdoptedText(args[0]->ToObject(), args[1]));
-}
-
-void BreakIterator::JSInternalBreakIteratorFirst(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->first()));
-}
-
-void BreakIterator::JSInternalBreakIteratorNext(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->next()));
-}
-
-void BreakIterator::JSInternalBreakIteratorCurrent(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->current()));
-}
-
-void BreakIterator::JSInternalBreakIteratorBreakType(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
- icu::RuleBasedBreakIterator* rule_based_iterator =
- static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
- int32_t status = rule_based_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- v8::Handle<v8::String> result;
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- result = v8::String::New("none");
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- result = v8::String::New("number");
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- result = v8::String::New("letter");
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- result = v8::String::New("kana");
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- result = v8::String::New("ideo");
- } else {
- result = v8::String::New("unknown");
- }
- args.GetReturnValue().Set(result);
-}
-
-void BreakIterator::JSCreateBreakIterator(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
- !args[2]->IsObject()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, wrong parameters.")));
- return;
- }
-
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::ObjectTemplate> break_iterator_template =
- Utils::GetTemplate2(isolate);
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object = break_iterator_template->NewInstance();
- // But the handle shouldn't be empty.
- // That can happen if there was a stack overflow when creating the object.
- if (local_object.IsEmpty()) {
- args.GetReturnValue().Set(local_object);
- return;
- }
-
- // Set break iterator as internal field of the resulting JS object.
- icu::BreakIterator* break_iterator = InitializeBreakIterator(
- args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
-
- if (!break_iterator) {
- v8::ThrowException(v8::Exception::Error(v8::String::New(
- "Internal error. Couldn't create ICU break iterator.")));
- return;
- } else {
- local_object->SetAlignedPointerInInternalField(0, break_iterator);
- // Make sure that the pointer to adopted text is NULL.
- local_object->SetAlignedPointerInInternalField(1, NULL);
-
- v8::TryCatch try_catch;
- local_object->Set(v8::String::New("breakIterator"),
- v8::String::New("valid"));
- if (try_catch.HasCaught()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, couldn't set property.")));
- return;
- }
- }
-
- v8::Persistent<v8::Object> wrapper(isolate, local_object);
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &DeleteBreakIterator);
- args.GetReturnValue().Set(wrapper);
- wrapper.ClearAndLeak();
-}
-
-static icu::BreakIterator* InitializeBreakIterator(
- v8::Handle<v8::String> locale,
- v8::Handle<v8::Object> options,
- v8::Handle<v8::Object> resolved) {
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::AsciiValue bcp47_locale(locale);
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
- }
- icu_locale = icu::Locale(icu_result);
- }
-
- icu::BreakIterator* break_iterator =
- CreateICUBreakIterator(icu_locale, options);
- if (!break_iterator) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- break_iterator = CreateICUBreakIterator(no_extension_locale, options);
-
- // Set resolved settings (locale).
- SetResolvedSettings(no_extension_locale, break_iterator, resolved);
- } else {
- SetResolvedSettings(icu_locale, break_iterator, resolved);
- }
-
- return break_iterator;
-}
-
-static icu::BreakIterator* CreateICUBreakIterator(
- const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
- UErrorCode status = U_ZERO_ERROR;
- icu::BreakIterator* break_iterator = NULL;
- icu::UnicodeString type;
- if (!Utils::ExtractStringSetting(options, "type", &type)) {
- // Type had to be in the options. This would be an internal error.
- return NULL;
- }
-
- if (type == UNICODE_STRING_SIMPLE("character")) {
- break_iterator =
- icu::BreakIterator::createCharacterInstance(icu_locale, status);
- } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
- break_iterator =
- icu::BreakIterator::createSentenceInstance(icu_locale, status);
- } else if (type == UNICODE_STRING_SIMPLE("line")) {
- break_iterator =
- icu::BreakIterator::createLineInstance(icu_locale, status);
- } else {
- // Defualt is word iterator.
- break_iterator =
- icu::BreakIterator::createWordInstance(icu_locale, status);
- }
-
- if (U_FAILURE(status)) {
- delete break_iterator;
- return NULL;
- }
-
- return break_iterator;
-}
-
-static void SetResolvedSettings(const icu::Locale& icu_locale,
- icu::BreakIterator* date_format,
- v8::Handle<v8::Object> resolved) {
- UErrorCode status = U_ZERO_ERROR;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- resolved->Set(v8::String::New("locale"), v8::String::New(result));
- } else {
- // This would never happen, since we got the locale from ICU.
- resolved->Set(v8::String::New("locale"), v8::String::New("und"));
- }
-}
-
-} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/break-iterator.h b/deps/v8/src/extensions/i18n/break-iterator.h
deleted file mode 100644
index c44c20fbc..000000000
--- a/deps/v8/src/extensions/i18n/break-iterator.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
-#define V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class BreakIterator;
-class UnicodeString;
-}
-
-namespace v8_i18n {
-
-class BreakIterator {
- public:
- static void JSCreateBreakIterator(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Helper methods for various bindings.
-
- // Unpacks iterator object from corresponding JavaScript object.
- static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the BreakIterator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteBreakIterator(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param);
-
- // Assigns new text to the iterator.
- static void JSInternalBreakIteratorAdoptText(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Moves iterator to the beginning of the string and returns new position.
- static void JSInternalBreakIteratorFirst(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Moves iterator to the next position and returns it.
- static void JSInternalBreakIteratorNext(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Returns current iterator's current position.
- static void JSInternalBreakIteratorCurrent(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Returns type of the item from current position.
- // This call is only valid for word break iterators. Others just return 0.
- static void JSInternalBreakIteratorBreakType(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- private:
- BreakIterator() {}
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
diff --git a/deps/v8/src/extensions/i18n/break-iterator.js b/deps/v8/src/extensions/i18n/break-iterator.js
deleted file mode 100644
index eefd8c2ab..000000000
--- a/deps/v8/src/extensions/i18n/break-iterator.js
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Initializes the given object so it's a valid BreakIterator instance.
- * Useful for subclassing.
- */
-function initializeBreakIterator(iterator, locales, options) {
- native function NativeJSCreateBreakIterator();
-
- if (iterator.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize v8BreakIterator object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var getOption = getGetOption(options, 'breakiterator');
-
- var internalOptions = {};
-
- defineWEProperty(internalOptions, 'type', getOption(
- 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
-
- var locale = resolveLocale('breakiterator', locales, options);
- var resolved = Object.defineProperties({}, {
- requestedLocale: {value: locale.locale, writable: true},
- type: {value: internalOptions.type, writable: true},
- locale: {writable: true}
- });
-
- var internalIterator = NativeJSCreateBreakIterator(locale.locale,
- internalOptions,
- resolved);
-
- Object.defineProperty(iterator, 'iterator', {value: internalIterator});
- Object.defineProperty(iterator, 'resolved', {value: resolved});
- Object.defineProperty(iterator, '__initializedIntlObject',
- {value: 'breakiterator'});
-
- return iterator;
-}
-
-
-/**
- * Constructs Intl.v8BreakIterator object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'v8BreakIterator', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.v8BreakIterator(locales, options);
- }
-
- return initializeBreakIterator(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * BreakIterator resolvedOptions method.
- */
-%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'breakiterator') {
- throw new TypeError('resolvedOptions method called on a non-object or ' +
- 'on a object that is not Intl.v8BreakIterator.');
- }
-
- var segmenter = this;
- var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
- segmenter.resolved.locale);
-
- return {
- locale: locale,
- type: segmenter.resolved.type
- };
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('breakiterator', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
-%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
-
-
-/**
- * Adopts text to segment using the iterator. Old text, if present,
- * gets discarded.
- */
-function adoptText(iterator, text) {
- native function NativeJSBreakIteratorAdoptText();
- NativeJSBreakIteratorAdoptText(iterator.iterator, String(text));
-}
-
-
-/**
- * Returns index of the first break in the string and moves current pointer.
- */
-function first(iterator) {
- native function NativeJSBreakIteratorFirst();
- return NativeJSBreakIteratorFirst(iterator.iterator);
-}
-
-
-/**
- * Returns the index of the next break and moves the pointer.
- */
-function next(iterator) {
- native function NativeJSBreakIteratorNext();
- return NativeJSBreakIteratorNext(iterator.iterator);
-}
-
-
-/**
- * Returns index of the current break.
- */
-function current(iterator) {
- native function NativeJSBreakIteratorCurrent();
- return NativeJSBreakIteratorCurrent(iterator.iterator);
-}
-
-
-/**
- * Returns type of the current break.
- */
-function breakType(iterator) {
- native function NativeJSBreakIteratorBreakType();
- return NativeJSBreakIteratorBreakType(iterator.iterator);
-}
-
-
-addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
-addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
-addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
-addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
-addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
diff --git a/deps/v8/src/extensions/i18n/collator.js b/deps/v8/src/extensions/i18n/collator.js
deleted file mode 100644
index d8d247b36..000000000
--- a/deps/v8/src/extensions/i18n/collator.js
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Initializes the given object so it's a valid Collator instance.
- * Useful for subclassing.
- */
-function initializeCollator(collator, locales, options) {
- if (collator.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize Collator object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var getOption = getGetOption(options, 'collator');
-
- var internalOptions = {};
-
- defineWEProperty(internalOptions, 'usage', getOption(
- 'usage', 'string', ['sort', 'search'], 'sort'));
-
- var sensitivity = getOption('sensitivity', 'string',
- ['base', 'accent', 'case', 'variant']);
- if (sensitivity === undefined && internalOptions.usage === 'sort') {
- sensitivity = 'variant';
- }
- defineWEProperty(internalOptions, 'sensitivity', sensitivity);
-
- defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
- 'ignorePunctuation', 'boolean', undefined, false));
-
- var locale = resolveLocale('collator', locales, options);
-
- // ICU can't take kb, kc... parameters through localeID, so we need to pass
- // them as options.
- // One exception is -co- which has to be part of the extension, but only for
- // usage: sort, and its value can't be 'standard' or 'search'.
- var extensionMap = parseExtension(locale.extension);
- setOptions(
- options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
-
- var collation = 'default';
- var extension = '';
- if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
- if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
- extension = '-u-co-' + extensionMap.co;
- // ICU can't tell us what the collation is, so save user's input.
- collation = extensionMap.co;
- }
- } else if (internalOptions.usage === 'search') {
- extension = '-u-co-search';
- }
- defineWEProperty(internalOptions, 'collation', collation);
-
- var requestedLocale = locale.locale + extension;
-
- // We define all properties C++ code may produce, to prevent security
- // problems. If malicious user decides to redefine Object.prototype.locale
- // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
- // Object.defineProperties will either succeed defining or throw an error.
- var resolved = Object.defineProperties({}, {
- caseFirst: {writable: true},
- collation: {value: internalOptions.collation, writable: true},
- ignorePunctuation: {writable: true},
- locale: {writable: true},
- numeric: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- sensitivity: {writable: true},
- strength: {writable: true},
- usage: {value: internalOptions.usage, writable: true}
- });
-
- var internalCollator = %CreateCollator(requestedLocale,
- internalOptions,
- resolved);
-
- // Writable, configurable and enumerable are set to false by default.
- Object.defineProperty(collator, 'collator', {value: internalCollator});
- Object.defineProperty(collator, '__initializedIntlObject',
- {value: 'collator'});
- Object.defineProperty(collator, 'resolved', {value: resolved});
-
- return collator;
-}
-
-
-/**
- * Constructs Intl.Collator object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'Collator', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.Collator(locales, options);
- }
-
- return initializeCollator(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * Collator resolvedOptions method.
- */
-%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'collator') {
- throw new TypeError('resolvedOptions method called on a non-object ' +
- 'or on a object that is not Intl.Collator.');
- }
-
- var coll = this;
- var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
- coll.resolved.locale);
-
- return {
- locale: locale,
- usage: coll.resolved.usage,
- sensitivity: coll.resolved.sensitivity,
- ignorePunctuation: coll.resolved.ignorePunctuation,
- numeric: coll.resolved.numeric,
- caseFirst: coll.resolved.caseFirst,
- collation: coll.resolved.collation
- };
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
-%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('collator', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
-%SetNativeFlag(Intl.Collator.supportedLocalesOf);
-
-
-/**
- * When the compare method is called with two arguments x and y, it returns a
- * Number other than NaN that represents the result of a locale-sensitive
- * String comparison of x with y.
- * The result is intended to order String values in the sort order specified
- * by the effective locale and collation options computed during construction
- * of this Collator object, and will be negative, zero, or positive, depending
- * on whether x comes before y in the sort order, the Strings are equal under
- * the sort order, or x comes after y in the sort order, respectively.
- */
-function compare(collator, x, y) {
- return %InternalCompare(collator.collator, String(x), String(y));
-};
-
-
-addBoundMethod(Intl.Collator, 'compare', compare, 2);
diff --git a/deps/v8/src/extensions/i18n/date-format.js b/deps/v8/src/extensions/i18n/date-format.js
deleted file mode 100644
index b1d28e535..000000000
--- a/deps/v8/src/extensions/i18n/date-format.js
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Returns a string that matches LDML representation of the options object.
- */
-function toLDMLString(options) {
- var getOption = getGetOption(options, 'dateformat');
-
- var ldmlString = '';
-
- var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(
- option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
-
- option = getOption('era', 'string', ['narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(
- option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
-
- option = getOption('year', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
-
- option = getOption('month', 'string',
- ['2-digit', 'numeric', 'narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
- 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
-
- option = getOption('day', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(
- option, {'2-digit': 'dd', 'numeric': 'd'});
-
- var hr12 = getOption('hour12', 'boolean');
- option = getOption('hour', 'string', ['2-digit', 'numeric']);
- if (hr12 === undefined) {
- ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
- } else if (hr12 === true) {
- ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
- } else {
- ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
- }
-
- option = getOption('minute', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
-
- option = getOption('second', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
-
- option = getOption('timeZoneName', 'string', ['short', 'long']);
- ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
-
- return ldmlString;
-}
-
-
-/**
- * Returns either LDML equivalent of the current option or empty string.
- */
-function appendToLDMLString(option, pairs) {
- if (option !== undefined) {
- return pairs[option];
- } else {
- return '';
- }
-}
-
-
-/**
- * Returns object that matches LDML representation of the date.
- */
-function fromLDMLString(ldmlString) {
- // First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = ldmlString.replace(QUOTED_STRING_RE, '');
-
- var options = {};
- var match = ldmlString.match(/E{3,5}/g);
- options = appendToDateTimeObject(
- options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
-
- match = ldmlString.match(/G{3,5}/g);
- options = appendToDateTimeObject(
- options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
-
- match = ldmlString.match(/y{1,2}/g);
- options = appendToDateTimeObject(
- options, 'year', match, {y: 'numeric', yy: '2-digit'});
-
- match = ldmlString.match(/M{1,5}/g);
- options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
- M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
-
- // Sometimes we get L instead of M for month - standalone name.
- match = ldmlString.match(/L{1,5}/g);
- options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
- L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
-
- match = ldmlString.match(/d{1,2}/g);
- options = appendToDateTimeObject(
- options, 'day', match, {d: 'numeric', dd: '2-digit'});
-
- match = ldmlString.match(/h{1,2}/g);
- if (match !== null) {
- options['hour12'] = true;
- }
- options = appendToDateTimeObject(
- options, 'hour', match, {h: 'numeric', hh: '2-digit'});
-
- match = ldmlString.match(/H{1,2}/g);
- if (match !== null) {
- options['hour12'] = false;
- }
- options = appendToDateTimeObject(
- options, 'hour', match, {H: 'numeric', HH: '2-digit'});
-
- match = ldmlString.match(/m{1,2}/g);
- options = appendToDateTimeObject(
- options, 'minute', match, {m: 'numeric', mm: '2-digit'});
-
- match = ldmlString.match(/s{1,2}/g);
- options = appendToDateTimeObject(
- options, 'second', match, {s: 'numeric', ss: '2-digit'});
-
- match = ldmlString.match(/v{1,2}/g);
- options = appendToDateTimeObject(
- options, 'timeZoneName', match, {v: 'short', vv: 'long'});
-
- return options;
-}
-
-
-function appendToDateTimeObject(options, option, match, pairs) {
- if (match === null) {
- if (!options.hasOwnProperty(option)) {
- defineWEProperty(options, option, undefined);
- }
- return options;
- }
-
- var property = match[0];
- defineWEProperty(options, option, pairs[property]);
-
- return options;
-}
-
-
-/**
- * Returns options with at least default values in it.
- */
-function toDateTimeOptions(options, required, defaults) {
- if (options === undefined) {
- options = null;
- } else {
- options = toObject(options);
- }
-
- options = Object.apply(this, [options]);
-
- var needsDefault = true;
- if ((required === 'date' || required === 'any') &&
- (options.weekday !== undefined || options.year !== undefined ||
- options.month !== undefined || options.day !== undefined)) {
- needsDefault = false;
- }
-
- if ((required === 'time' || required === 'any') &&
- (options.hour !== undefined || options.minute !== undefined ||
- options.second !== undefined)) {
- needsDefault = false;
- }
-
- if (needsDefault && (defaults === 'date' || defaults === 'all')) {
- Object.defineProperty(options, 'year', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'month', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'day', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- if (needsDefault && (defaults === 'time' || defaults === 'all')) {
- Object.defineProperty(options, 'hour', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'minute', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'second', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- return options;
-}
-
-
-/**
- * Initializes the given object so it's a valid DateTimeFormat instance.
- * Useful for subclassing.
- */
-function initializeDateTimeFormat(dateFormat, locales, options) {
-
- if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize DateTimeFormat object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var locale = resolveLocale('dateformat', locales, options);
-
- options = toDateTimeOptions(options, 'any', 'date');
-
- var getOption = getGetOption(options, 'dateformat');
-
- // We implement only best fit algorithm, but still need to check
- // if the formatMatcher values are in range.
- var matcher = getOption('formatMatcher', 'string',
- ['basic', 'best fit'], 'best fit');
-
- // Build LDML string for the skeleton that we pass to the formatter.
- var ldmlString = toLDMLString(options);
-
- // Filter out supported extension keys so we know what to put in resolved
- // section later on.
- // We need to pass calendar and number system to the method.
- var tz = canonicalizeTimeZoneID(options.timeZone);
-
- // ICU prefers options to be passed using -u- extension key/values, so
- // we need to build that.
- var internalOptions = {};
- var extensionMap = parseExtension(locale.extension);
- var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
- getOption, internalOptions);
-
- var requestedLocale = locale.locale + extension;
- var resolved = Object.defineProperties({}, {
- calendar: {writable: true},
- day: {writable: true},
- era: {writable: true},
- hour12: {writable: true},
- hour: {writable: true},
- locale: {writable: true},
- minute: {writable: true},
- month: {writable: true},
- numberingSystem: {writable: true},
- pattern: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- second: {writable: true},
- timeZone: {writable: true},
- timeZoneName: {writable: true},
- tz: {value: tz, writable: true},
- weekday: {writable: true},
- year: {writable: true}
- });
-
- var formatter = %CreateDateTimeFormat(
- requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
-
- if (tz !== undefined && tz !== resolved.timeZone) {
- throw new RangeError('Unsupported time zone specified ' + tz);
- }
-
- Object.defineProperty(dateFormat, 'formatter', {value: formatter});
- Object.defineProperty(dateFormat, 'resolved', {value: resolved});
- Object.defineProperty(dateFormat, '__initializedIntlObject',
- {value: 'dateformat'});
-
- return dateFormat;
-}
-
-
-/**
- * Constructs Intl.DateTimeFormat object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'DateTimeFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.DateTimeFormat(locales, options);
- }
-
- return initializeDateTimeFormat(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * DateTimeFormat resolvedOptions method.
- */
-%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'dateformat') {
- throw new TypeError('resolvedOptions method called on a non-object or ' +
- 'on a object that is not Intl.DateTimeFormat.');
- }
-
- var format = this;
- var fromPattern = fromLDMLString(format.resolved.pattern);
- var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
- if (userCalendar === undefined) {
- // Use ICU name if we don't have a match. It shouldn't happen, but
- // it would be too strict to throw for this.
- userCalendar = format.resolved.calendar;
- }
-
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
-
- var result = {
- locale: locale,
- numberingSystem: format.resolved.numberingSystem,
- calendar: userCalendar,
- timeZone: format.resolved.timeZone
- };
-
- addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
- addWECPropertyIfDefined(result, 'era', fromPattern.era);
- addWECPropertyIfDefined(result, 'year', fromPattern.year);
- addWECPropertyIfDefined(result, 'month', fromPattern.month);
- addWECPropertyIfDefined(result, 'day', fromPattern.day);
- addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
- addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
- addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
- addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
- addWECPropertyIfDefined(result, 'second', fromPattern.second);
-
- return result;
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('dateformat', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
-
-
-/**
- * Returns a String value representing the result of calling ToNumber(date)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- */
-function formatDate(formatter, dateValue) {
- var dateMs;
- if (dateValue === undefined) {
- dateMs = Date.now();
- } else {
- dateMs = Number(dateValue);
- }
-
- if (!isFinite(dateMs)) {
- throw new RangeError('Provided date is not in valid range.');
- }
-
- return %InternalDateFormat(formatter.formatter, new Date(dateMs));
-}
-
-
-/**
- * Returns a Date object representing the result of calling ToString(value)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- * Returns undefined if date string cannot be parsed.
- */
-function parseDate(formatter, value) {
- return %InternalDateParse(formatter.formatter, String(value));
-}
-
-
-// 0 because date is optional argument.
-addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
-addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
-
-
-/**
- * Returns canonical Area/Location name, or throws an exception if the zone
- * name is invalid IANA name.
- */
-function canonicalizeTimeZoneID(tzID) {
- // Skip undefined zones.
- if (tzID === undefined) {
- return tzID;
- }
-
- // Special case handling (UTC, GMT).
- var upperID = tzID.toUpperCase();
- if (upperID === 'UTC' || upperID === 'GMT' ||
- upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
- return 'UTC';
- }
-
- // We expect only _ and / beside ASCII letters.
- // All inputs should conform to Area/Location from now on.
- var match = TIMEZONE_NAME_CHECK_RE.exec(tzID);
- if (match === null) {
- throw new RangeError('Expected Area/Location for time zone, got ' + tzID);
- }
-
- var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
- var i = 3;
- while (match[i] !== undefined && i < match.length) {
- result = result + '_' + toTitleCaseWord(match[i]);
- i++;
- }
-
- return result;
-}
diff --git a/deps/v8/src/extensions/i18n/footer.js b/deps/v8/src/extensions/i18n/footer.js
deleted file mode 100644
index adaa63346..000000000
--- a/deps/v8/src/extensions/i18n/footer.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-// Fix RegExp global state so we don't fail WebKit layout test:
-// fast/js/regexp-caching.html
-// It seems that 'g' or test() operations leave state changed.
-var CLEANUP_RE = new RegExp('');
-CLEANUP_RE.test('');
-
-return Intl;
-}())});
diff --git a/deps/v8/src/extensions/i18n/globals.js b/deps/v8/src/extensions/i18n/globals.js
deleted file mode 100644
index 68fabe777..000000000
--- a/deps/v8/src/extensions/i18n/globals.js
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-
-/**
- * List of available services.
- */
-var AVAILABLE_SERVICES = ['collator',
- 'numberformat',
- 'dateformat',
- 'breakiterator'];
-
-/**
- * Caches available locales for each service.
- */
-var AVAILABLE_LOCALES = {
- 'collator': undefined,
- 'numberformat': undefined,
- 'dateformat': undefined,
- 'breakiterator': undefined
-};
-
-/**
- * Caches default ICU locale.
- */
-var DEFAULT_ICU_LOCALE = undefined;
-
-/**
- * Unicode extension regular expression.
- */
-var UNICODE_EXTENSION_RE = new RegExp('-u(-[a-z0-9]{2,8})+', 'g');
-
-/**
- * Matches any Unicode extension.
- */
-var ANY_EXTENSION_RE = new RegExp('-[a-z0-9]{1}-.*', 'g');
-
-/**
- * Replace quoted text (single quote, anything but the quote and quote again).
- */
-var QUOTED_STRING_RE = new RegExp("'[^']+'", 'g');
-
-/**
- * Matches valid service name.
- */
-var SERVICE_RE =
- new RegExp('^(collator|numberformat|dateformat|breakiterator)$');
-
-/**
- * Validates a language tag against bcp47 spec.
- * Actual value is assigned on first run.
- */
-var LANGUAGE_TAG_RE = undefined;
-
-/**
- * Helps find duplicate variants in the language tag.
- */
-var LANGUAGE_VARIANT_RE = undefined;
-
-/**
- * Helps find duplicate singletons in the language tag.
- */
-var LANGUAGE_SINGLETON_RE = undefined;
-
-/**
- * Matches valid IANA time zone names.
- */
-var TIMEZONE_NAME_CHECK_RE =
- new RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
-
-/**
- * Maps ICU calendar names into LDML type.
- */
-var ICU_CALENDAR_MAP = {
- 'gregorian': 'gregory',
- 'japanese': 'japanese',
- 'buddhist': 'buddhist',
- 'roc': 'roc',
- 'persian': 'persian',
- 'islamic-civil': 'islamicc',
- 'islamic': 'islamic',
- 'hebrew': 'hebrew',
- 'chinese': 'chinese',
- 'indian': 'indian',
- 'coptic': 'coptic',
- 'ethiopic': 'ethiopic',
- 'ethiopic-amete-alem': 'ethioaa'
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a collator.
- */
-var COLLATOR_KEY_MAP = {
- 'kn': {'property': 'numeric', 'type': 'boolean'},
- 'kf': {'property': 'caseFirst', 'type': 'string',
- 'values': ['false', 'lower', 'upper']}
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a number format.
- */
-var NUMBER_FORMAT_KEY_MAP = {
- 'nu': {'property': undefined, 'type': 'string'}
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a date/time format.
- */
-var DATETIME_FORMAT_KEY_MAP = {
- 'ca': {'property': undefined, 'type': 'string'},
- 'nu': {'property': undefined, 'type': 'string'}
-};
-
-/**
- * Allowed -u-co- values. List taken from:
- * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
- */
-var ALLOWED_CO_VALUES = [
- 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
- 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
-];
-
-/**
- * Object attributes (configurable, writable, enumerable).
- * To combine attributes, OR them.
- * Values/names are copied from v8/include/v8.h:PropertyAttribute
- */
-var ATTRIBUTES = {
- 'NONE': 0,
- 'READ_ONLY': 1,
- 'DONT_ENUM': 2,
- 'DONT_DELETE': 4
-};
-
-/**
- * Error message for when function object is created with new and it's not
- * a constructor.
- */
-var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
- 'Function object that\'s not a constructor was created with new';
diff --git a/deps/v8/src/extensions/i18n/header.js b/deps/v8/src/extensions/i18n/header.js
deleted file mode 100644
index b854ce5ea..000000000
--- a/deps/v8/src/extensions/i18n/header.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Intl object is a single object that has some named properties,
- * all of which are constructors.
- */
-Object.defineProperty(this, "Intl", { enumerable: false, value: (function() {
-
-'use strict';
-
-var Intl = {};
diff --git a/deps/v8/src/extensions/i18n/i18n-extension.cc b/deps/v8/src/extensions/i18n/i18n-extension.cc
deleted file mode 100644
index e2cba8eb9..000000000
--- a/deps/v8/src/extensions/i18n/i18n-extension.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "i18n-extension.h"
-
-#include "break-iterator.h"
-#include "natives.h"
-
-using v8::internal::I18NNatives;
-
-namespace v8_i18n {
-
-Extension::Extension()
- : v8::Extension("v8/i18n",
- reinterpret_cast<const char*>(
- I18NNatives::GetScriptsSource().start()),
- 0,
- 0,
- I18NNatives::GetScriptsSource().length()) {}
-
-v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
- v8::Handle<v8::String> name) {
- // Break iterator.
- if (name->Equals(v8::String::New("NativeJSCreateBreakIterator"))) {
- return v8::FunctionTemplate::New(BreakIterator::JSCreateBreakIterator);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorAdoptText"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorAdoptText);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorFirst"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorFirst);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorNext"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorNext);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorCurrent"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorCurrent);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorBreakType"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorBreakType);
- }
-
- return v8::Handle<v8::FunctionTemplate>();
-}
-
-
-void Extension::Register() {
- static Extension i18n_extension;
- static v8::DeclareExtension extension_declaration(&i18n_extension);
-}
-
-} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/i18n-extension.h b/deps/v8/src/extensions/i18n/i18n-extension.h
deleted file mode 100644
index 050c336a6..000000000
--- a/deps/v8/src/extensions/i18n/i18n-extension.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8_i18n {
-
-class Extension : public v8::Extension {
- public:
- Extension();
-
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
-
- static void Register();
-
- private:
- static Extension* extension_;
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.cc b/deps/v8/src/extensions/i18n/i18n-utils.cc
deleted file mode 100644
index 8c87f0715..000000000
--- a/deps/v8/src/extensions/i18n/i18n-utils.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "i18n-utils.h"
-
-#include <string.h>
-
-#include "unicode/unistr.h"
-
-namespace v8_i18n {
-
-// static
-void Utils::StrNCopy(char* dest, int length, const char* src) {
- if (!dest || !src) return;
-
- strncpy(dest, src, length);
- dest[length - 1] = '\0';
-}
-
-
-// static
-bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
- icu::UnicodeString* output) {
- v8::String::Utf8Value utf8_value(input);
-
- if (*utf8_value == NULL) return false;
-
- output->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
-
- return true;
-}
-
-
-// static
-bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- icu::UnicodeString* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
- return V8StringToUnicodeString(value, result);
- }
- return false;
-}
-
-
-// static
-bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- int32_t* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsNumber()) {
- *result = static_cast<int32_t>(value->Int32Value());
- return true;
- }
- return false;
-}
-
-
-// static
-bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- bool* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsBoolean()) {
- *result = static_cast<bool>(value->BooleanValue());
- return true;
- }
- return false;
-}
-
-
-// static
-void Utils::AsciiToUChar(const char* source,
- int32_t source_length,
- UChar* target,
- int32_t target_length) {
- int32_t length =
- source_length < target_length ? source_length : target_length;
-
- if (length <= 0) {
- return;
- }
-
- for (int32_t i = 0; i < length - 1; ++i) {
- target[i] = static_cast<UChar>(source[i]);
- }
-
- target[length - 1] = 0x0u;
-}
-
-
-static v8::Local<v8::ObjectTemplate> ToLocal(i::Handle<i::Object> handle) {
- return v8::Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>::cast(handle));
-}
-
-
-template<int internal_fields, i::EternalHandles::SingletonHandle field>
-static v8::Local<v8::ObjectTemplate> GetEternal(v8::Isolate* external) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external);
- if (isolate->eternal_handles()->Exists(field)) {
- return ToLocal(isolate->eternal_handles()->GetSingleton(field));
- }
- v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
- raw_template->SetInternalFieldCount(internal_fields);
- return ToLocal(
- isolate->eternal_handles()->CreateSingleton(
- isolate,
- *v8::Utils::OpenHandle(*raw_template),
- field));
-}
-
-
-// static
-v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
- return GetEternal<1, i::EternalHandles::I18N_TEMPLATE_ONE>(isolate);
-}
-
-
-// static
-v8::Local<v8::ObjectTemplate> Utils::GetTemplate2(v8::Isolate* isolate) {
- return GetEternal<2, i::EternalHandles::I18N_TEMPLATE_TWO>(isolate);
-}
-
-
-} // namespace v8_i18n
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.h b/deps/v8/src/extensions/i18n/i18n-utils.h
deleted file mode 100644
index db5d1b6ac..000000000
--- a/deps/v8/src/extensions/i18n/i18n-utils.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_SRC_UTILS_H_
-#define V8_EXTENSIONS_I18N_SRC_UTILS_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class UnicodeString;
-}
-
-namespace v8_i18n {
-
-class Utils {
- public:
- // Safe string copy. Null terminates the destination. Copies at most
- // (length - 1) bytes.
- // We can't use snprintf since it's not supported on all relevant platforms.
- // We can't use OS::SNPrintF, it's only for internal code.
- static void StrNCopy(char* dest, int length, const char* src);
-
- // Converts v8::String into UnicodeString. Returns false if input
- // can't be converted into utf8.
- static bool V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
- icu::UnicodeString* output);
-
- // Extract a String setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- icu::UnicodeString* result);
-
- // Extract a Integer setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- int32_t* result);
-
- // Extract a Boolean setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- bool* result);
-
- // Converts ASCII array into UChar array.
- // Target is always \0 terminated.
- static void AsciiToUChar(const char* source,
- int32_t source_length,
- UChar* target,
- int32_t target_length);
-
- // Creates an ObjectTemplate with one internal field.
- static v8::Local<v8::ObjectTemplate> GetTemplate(v8::Isolate* isolate);
-
- // Creates an ObjectTemplate with two internal fields.
- static v8::Local<v8::ObjectTemplate> GetTemplate2(v8::Isolate* isolate);
-
- private:
- Utils() {}
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_UTILS_H_
diff --git a/deps/v8/src/extensions/i18n/i18n-utils.js b/deps/v8/src/extensions/i18n/i18n-utils.js
deleted file mode 100644
index 545082ecb..000000000
--- a/deps/v8/src/extensions/i18n/i18n-utils.js
+++ /dev/null
@@ -1,536 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Adds bound method to the prototype of the given object.
- */
-function addBoundMethod(obj, methodName, implementation, length) {
- function getter() {
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject === undefined) {
- throw new TypeError('Method ' + methodName + ' called on a ' +
- 'non-object or on a wrong type of object.');
- }
- var internalName = '__bound' + methodName + '__';
- if (this[internalName] === undefined) {
- var that = this;
- var boundMethod;
- if (length === undefined || length === 2) {
- boundMethod = function(x, y) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
- return implementation(that, x, y);
- }
- } else if (length === 1) {
- boundMethod = function(x) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
- return implementation(that, x);
- }
- } else {
- boundMethod = function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
- // DateTimeFormat.format needs to be 0 arg method, but can stil
- // receive optional dateValue param. If one was provided, pass it
- // along.
- if (arguments.length > 0) {
- return implementation(that, arguments[0]);
- } else {
- return implementation(that);
- }
- }
- }
- %FunctionSetName(boundMethod, internalName);
- %FunctionRemovePrototype(boundMethod);
- %SetNativeFlag(boundMethod);
- this[internalName] = boundMethod;
- }
- return this[internalName];
- }
-
- %FunctionSetName(getter, methodName);
- %FunctionRemovePrototype(getter);
- %SetNativeFlag(getter);
-
- Object.defineProperty(obj.prototype, methodName, {
- get: getter,
- enumerable: false,
- configurable: true
- });
-}
-
-
-/**
- * Returns an intersection of locales and service supported locales.
- * Parameter locales is treated as a priority list.
- */
-function supportedLocalesOf(service, locales, options) {
- if (service.match(SERVICE_RE) === null) {
- throw new Error('Internal error, wrong service type: ' + service);
- }
-
- // Provide defaults if matcher was not specified.
- if (options === undefined) {
- options = {};
- } else {
- options = toObject(options);
- }
-
- var matcher = options.localeMatcher;
- if (matcher !== undefined) {
- matcher = String(matcher);
- if (matcher !== 'lookup' && matcher !== 'best fit') {
- throw new RangeError('Illegal value for localeMatcher:' + matcher);
- }
- } else {
- matcher = 'best fit';
- }
-
- var requestedLocales = initializeLocaleList(locales);
-
- // Cache these, they don't ever change per service.
- if (AVAILABLE_LOCALES[service] === undefined) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
-
- // Use either best fit or lookup algorithm to match locales.
- if (matcher === 'best fit') {
- return initializeLocaleList(bestFitSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
- }
-
- return initializeLocaleList(lookupSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
-}
-
-
-/**
- * Returns the subset of the provided BCP 47 language priority list for which
- * this service has a matching locale when using the BCP 47 Lookup algorithm.
- * Locales appear in the same order in the returned list as in the input list.
- */
-function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
- var matchedLocales = [];
- for (var i = 0; i < requestedLocales.length; ++i) {
- // Remove -u- extension.
- var locale = requestedLocales[i].replace(UNICODE_EXTENSION_RE, '');
- do {
- if (availableLocales[locale] !== undefined) {
- // Push requested locale not the resolved one.
- matchedLocales.push(requestedLocales[i]);
- break;
- }
- // Truncate locale if possible, if not break.
- var pos = locale.lastIndexOf('-');
- if (pos === -1) {
- break;
- }
- locale = locale.substring(0, pos);
- } while (true);
- }
-
- return matchedLocales;
-}
-
-
-/**
- * Returns the subset of the provided BCP 47 language priority list for which
- * this service has a matching locale when using the implementation
- * dependent algorithm.
- * Locales appear in the same order in the returned list as in the input list.
- */
-function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
- // TODO(cira): implement better best fit algorithm.
- return lookupSupportedLocalesOf(requestedLocales, availableLocales);
-}
-
-
-/**
- * Returns a getOption function that extracts property value for given
- * options object. If property is missing it returns defaultValue. If value
- * is out of range for that property it throws RangeError.
- */
-function getGetOption(options, caller) {
- if (options === undefined) {
- throw new Error('Internal ' + caller + ' error. ' +
- 'Default options are missing.');
- }
-
- var getOption = function getOption(property, type, values, defaultValue) {
- if (options[property] !== undefined) {
- var value = options[property];
- switch (type) {
- case 'boolean':
- value = Boolean(value);
- break;
- case 'string':
- value = String(value);
- break;
- case 'number':
- value = Number(value);
- break;
- default:
- throw new Error('Internal error. Wrong value type.');
- }
- if (values !== undefined && values.indexOf(value) === -1) {
- throw new RangeError('Value ' + value + ' out of range for ' + caller +
- ' options property ' + property);
- }
-
- return value;
- }
-
- return defaultValue;
- }
-
- return getOption;
-}
-
-
-/**
- * Compares a BCP 47 language priority list requestedLocales against the locales
- * in availableLocales and determines the best available language to meet the
- * request. Two algorithms are available to match the locales: the Lookup
- * algorithm described in RFC 4647 section 3.4, and an implementation dependent
- * best-fit algorithm. Independent of the locale matching algorithm, options
- * specified through Unicode locale extension sequences are negotiated
- * separately, taking the caller's relevant extension keys and locale data as
- * well as client-provided options into consideration. Returns an object with
- * a locale property whose value is the language tag of the selected locale,
- * and properties for each key in relevantExtensionKeys providing the selected
- * value for that key.
- */
-function resolveLocale(service, requestedLocales, options) {
- requestedLocales = initializeLocaleList(requestedLocales);
-
- var getOption = getGetOption(options, service);
- var matcher = getOption('localeMatcher', 'string',
- ['lookup', 'best fit'], 'best fit');
- var resolved;
- if (matcher === 'lookup') {
- resolved = lookupMatcher(service, requestedLocales);
- } else {
- resolved = bestFitMatcher(service, requestedLocales);
- }
-
- return resolved;
-}
-
-
-/**
- * Returns best matched supported locale and extension info using basic
- * lookup algorithm.
- */
-function lookupMatcher(service, requestedLocales) {
- if (service.match(SERVICE_RE) === null) {
- throw new Error('Internal error, wrong service type: ' + service);
- }
-
- // Cache these, they don't ever change per service.
- if (AVAILABLE_LOCALES[service] === undefined) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
-
- for (var i = 0; i < requestedLocales.length; ++i) {
- // Remove all extensions.
- var locale = requestedLocales[i].replace(ANY_EXTENSION_RE, '');
- do {
- if (AVAILABLE_LOCALES[service][locale] !== undefined) {
- // Return the resolved locale and extension.
- var extensionMatch = requestedLocales[i].match(UNICODE_EXTENSION_RE);
- var extension = (extensionMatch === null) ? '' : extensionMatch[0];
- return {'locale': locale, 'extension': extension, 'position': i};
- }
- // Truncate locale if possible.
- var pos = locale.lastIndexOf('-');
- if (pos === -1) {
- break;
- }
- locale = locale.substring(0, pos);
- } while (true);
- }
-
- // Didn't find a match, return default.
- if (DEFAULT_ICU_LOCALE === undefined) {
- DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
- }
-
- return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
-}
-
-
-/**
- * Returns best matched supported locale and extension info using
- * implementation dependend algorithm.
- */
-function bestFitMatcher(service, requestedLocales) {
- // TODO(cira): implement better best fit algorithm.
- return lookupMatcher(service, requestedLocales);
-}
-
-
-/**
- * Parses Unicode extension into key - value map.
- * Returns empty object if the extension string is invalid.
- * We are not concerned with the validity of the values at this point.
- */
-function parseExtension(extension) {
- var extensionSplit = extension.split('-');
-
- // Assume ['', 'u', ...] input, but don't throw.
- if (extensionSplit.length <= 2 ||
- (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
- return {};
- }
-
- // Key is {2}alphanum, value is {3,8}alphanum.
- // Some keys may not have explicit values (booleans).
- var extensionMap = {};
- var previousKey = undefined;
- for (var i = 2; i < extensionSplit.length; ++i) {
- var length = extensionSplit[i].length;
- var element = extensionSplit[i];
- if (length === 2) {
- extensionMap[element] = undefined;
- previousKey = element;
- } else if (length >= 3 && length <=8 && previousKey !== undefined) {
- extensionMap[previousKey] = element;
- previousKey = undefined;
- } else {
- // There is a value that's too long, or that doesn't have a key.
- return {};
- }
- }
-
- return extensionMap;
-}
-
-
-/**
- * Converts parameter to an Object if possible.
- */
-function toObject(value) {
- if (value === undefined || value === null) {
- throw new TypeError('Value cannot be converted to an Object.');
- }
-
- return Object(value);
-}
-
-
-/**
- * Populates internalOptions object with boolean key-value pairs
- * from extensionMap and options.
- * Returns filtered extension (number and date format constructors use
- * Unicode extensions for passing parameters to ICU).
- * It's used for extension-option pairs only, e.g. kn-normalization, but not
- * for 'sensitivity' since it doesn't have extension equivalent.
- * Extensions like nu and ca don't have options equivalent, so we place
- * undefined in the map.property to denote that.
- */
-function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
- var extension = '';
-
- var updateExtension = function updateExtension(key, value) {
- return '-' + key + '-' + String(value);
- }
-
- var updateProperty = function updateProperty(property, type, value) {
- if (type === 'boolean' && (typeof value === 'string')) {
- value = (value === 'true') ? true : false;
- }
-
- if (property !== undefined) {
- defineWEProperty(outOptions, property, value);
- }
- }
-
- for (var key in keyValues) {
- if (keyValues.hasOwnProperty(key)) {
- var value = undefined;
- var map = keyValues[key];
- if (map.property !== undefined) {
- // This may return true if user specifies numeric: 'false', since
- // Boolean('nonempty') === true.
- value = getOption(map.property, map.type, map.values);
- }
- if (value !== undefined) {
- updateProperty(map.property, map.type, value);
- extension += updateExtension(key, value);
- continue;
- }
- // User options didn't have it, check Unicode extension.
- // Here we want to convert strings 'true', 'false' into proper Boolean
- // values (not a user error).
- if (extensionMap.hasOwnProperty(key)) {
- value = extensionMap[key];
- if (value !== undefined) {
- updateProperty(map.property, map.type, value);
- extension += updateExtension(key, value);
- } else if (map.type === 'boolean') {
- // Boolean keys are allowed not to have values in Unicode extension.
- // Those default to true.
- updateProperty(map.property, map.type, true);
- extension += updateExtension(key, true);
- }
- }
- }
- }
-
- return extension === ''? '' : '-u' + extension;
-}
-
-
-/**
- * Converts all OwnProperties into
- * configurable: false, writable: false, enumerable: true.
- */
-function freezeArray(array) {
- array.forEach(function(element, index) {
- Object.defineProperty(array, index, {value: element,
- configurable: false,
- writable: false,
- enumerable: true});
- });
-
- Object.defineProperty(array, 'length', {value: array.length,
- writable: false});
-
- return array;
-}
-
-
-/**
- * It's sometimes desireable to leave user requested locale instead of ICU
- * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
- * one, if that was what user requested).
- * This function returns user specified tag if its maximized form matches ICU
- * resolved locale. If not we return ICU result.
- */
-function getOptimalLanguageTag(original, resolved) {
- // Returns Array<Object>, where each object has maximized and base properties.
- // Maximized: zh -> zh-Hans-CN
- // Base: zh-CN-u-ca-gregory -> zh-CN
- // Take care of grandfathered or simple cases.
- if (original === resolved) {
- return original;
- }
-
- var locales = %GetLanguageTagVariants([original, resolved]);
- if (locales[0].maximized !== locales[1].maximized) {
- return resolved;
- }
-
- // Preserve extensions of resolved locale, but swap base tags with original.
- var resolvedBase = new RegExp('^' + locales[1].base);
- return resolved.replace(resolvedBase, locales[0].base);
-}
-
-
-/**
- * Returns an Object that contains all of supported locales for a given
- * service.
- * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
- * that is supported. This is required by the spec.
- */
-function getAvailableLocalesOf(service) {
- var available = %AvailableLocalesOf(service);
-
- for (var i in available) {
- if (available.hasOwnProperty(i)) {
- var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
- if (parts !== null) {
- // Build xx-ZZ. We don't care about the actual value,
- // as long it's not undefined.
- available[parts[1] + '-' + parts[3]] = null;
- }
- }
- }
-
- return available;
-}
-
-
-/**
- * Defines a property and sets writable and enumerable to true.
- * Configurable is false by default.
- */
-function defineWEProperty(object, property, value) {
- Object.defineProperty(object, property,
- {value: value, writable: true, enumerable: true});
-}
-
-
-/**
- * Adds property to an object if the value is not undefined.
- * Sets configurable descriptor to false.
- */
-function addWEPropertyIfDefined(object, property, value) {
- if (value !== undefined) {
- defineWEProperty(object, property, value);
- }
-}
-
-
-/**
- * Defines a property and sets writable, enumerable and configurable to true.
- */
-function defineWECProperty(object, property, value) {
- Object.defineProperty(object, property,
- {value: value,
- writable: true,
- enumerable: true,
- configurable: true});
-}
-
-
-/**
- * Adds property to an object if the value is not undefined.
- * Sets all descriptors to true.
- */
-function addWECPropertyIfDefined(object, property, value) {
- if (value !== undefined) {
- defineWECProperty(object, property, value);
- }
-}
-
-
-/**
- * Returns titlecased word, aMeRricA -> America.
- */
-function toTitleCaseWord(word) {
- return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
-}
diff --git a/deps/v8/src/extensions/i18n/locale.js b/deps/v8/src/extensions/i18n/locale.js
deleted file mode 100644
index e4783277e..000000000
--- a/deps/v8/src/extensions/i18n/locale.js
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Canonicalizes the language tag, or throws in case the tag is invalid.
- */
-function canonicalizeLanguageTag(localeID) {
- // null is typeof 'object' so we have to do extra check.
- if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
- localeID === null) {
- throw new TypeError('Language ID should be string or object.');
- }
-
- var localeString = String(localeID);
-
- if (isValidLanguageTag(localeString) === false) {
- throw new RangeError('Invalid language tag: ' + localeString);
- }
-
- // This call will strip -kn but not -kn-true extensions.
- // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
- // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
- // upgrade to ICU 4.9.
- var tag = %CanonicalizeLanguageTag(localeString);
- if (tag === 'invalid-tag') {
- throw new RangeError('Invalid language tag: ' + localeString);
- }
-
- return tag;
-}
-
-
-/**
- * Returns an array where all locales are canonicalized and duplicates removed.
- * Throws on locales that are not well formed BCP47 tags.
- */
-function initializeLocaleList(locales) {
- var seen = [];
- if (locales === undefined) {
- // Constructor is called without arguments.
- seen = [];
- } else {
- // We allow single string localeID.
- if (typeof locales === 'string') {
- seen.push(canonicalizeLanguageTag(locales));
- return freezeArray(seen);
- }
-
- var o = toObject(locales);
- // Converts it to UInt32 (>>> is shr on 32bit integers).
- var len = o.length >>> 0;
-
- for (var k = 0; k < len; k++) {
- if (k in o) {
- var value = o[k];
-
- var tag = canonicalizeLanguageTag(value);
-
- if (seen.indexOf(tag) === -1) {
- seen.push(tag);
- }
- }
- }
- }
-
- return freezeArray(seen);
-}
-
-
-/**
- * Validates the language tag. Section 2.2.9 of the bcp47 spec
- * defines a valid tag.
- *
- * ICU is too permissible and lets invalid tags, like
- * hant-cmn-cn, through.
- *
- * Returns false if the language tag is invalid.
- */
-function isValidLanguageTag(locale) {
- // Check if it's well-formed, including grandfadered tags.
- if (LANGUAGE_TAG_RE.test(locale) === false) {
- return false;
- }
-
- // Just return if it's a x- form. It's all private.
- if (locale.indexOf('x-') === 0) {
- return true;
- }
-
- // Check if there are any duplicate variants or singletons (extensions).
-
- // Remove private use section.
- locale = locale.split(/-x-/)[0];
-
- // Skip language since it can match variant regex, so we start from 1.
- // We are matching i-klingon here, but that's ok, since i-klingon-klingon
- // is not valid and would fail LANGUAGE_TAG_RE test.
- var variants = [];
- var extensions = [];
- var parts = locale.split(/-/);
- for (var i = 1; i < parts.length; i++) {
- var value = parts[i];
- if (LANGUAGE_VARIANT_RE.test(value) === true && extensions.length === 0) {
- if (variants.indexOf(value) === -1) {
- variants.push(value);
- } else {
- return false;
- }
- }
-
- if (LANGUAGE_SINGLETON_RE.test(value) === true) {
- if (extensions.indexOf(value) === -1) {
- extensions.push(value);
- } else {
- return false;
- }
- }
- }
-
- return true;
- }
-
-
-/**
- * Builds a regular expresion that validates the language tag
- * against bcp47 spec.
- * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
- * Runs on load and initializes the global REs.
- */
-(function() {
- var alpha = '[a-zA-Z]';
- var digit = '[0-9]';
- var alphanum = '(' + alpha + '|' + digit + ')';
- var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
- 'zh-min|zh-min-nan|zh-xiang)';
- var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
- 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
- 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
- var grandfathered = '(' + irregular + '|' + regular + ')';
- var privateUse = '(x(-' + alphanum + '{1,8})+)';
-
- var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
- LANGUAGE_SINGLETON_RE = new RegExp('^' + singleton + '$', 'i');
-
- var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
-
- var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
- LANGUAGE_VARIANT_RE = new RegExp('^' + variant + '$', 'i');
-
- var region = '(' + alpha + '{2}|' + digit + '{3})';
- var script = '(' + alpha + '{4})';
- var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
- var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
- alpha + '{5,8})';
- var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
- variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
-
- var languageTag =
- '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
- LANGUAGE_TAG_RE = new RegExp(languageTag, 'i');
-})();
diff --git a/deps/v8/src/extensions/i18n/number-format.js b/deps/v8/src/extensions/i18n/number-format.js
deleted file mode 100644
index 5722a5dc1..000000000
--- a/deps/v8/src/extensions/i18n/number-format.js
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Verifies that the input is a well-formed ISO 4217 currency code.
- * Don't uppercase to test. It could convert invalid code into a valid one.
- * For example \u00DFP (Eszett+P) becomes SSP.
- */
-function isWellFormedCurrencyCode(currency) {
- return typeof currency == "string" &&
- currency.length == 3 &&
- currency.match(/[^A-Za-z]/) == null;
-}
-
-
-/**
- * Returns the valid digit count for a property, or throws RangeError on
- * a value out of the range.
- */
-function getNumberOption(options, property, min, max, fallback) {
- var value = options[property];
- if (value !== undefined) {
- value = Number(value);
- if (isNaN(value) || value < min || value > max) {
- throw new RangeError(property + ' value is out of range.');
- }
- return Math.floor(value);
- }
-
- return fallback;
-}
-
-
-/**
- * Initializes the given object so it's a valid NumberFormat instance.
- * Useful for subclassing.
- */
-function initializeNumberFormat(numberFormat, locales, options) {
- if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize NumberFormat object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var getOption = getGetOption(options, 'numberformat');
-
- var locale = resolveLocale('numberformat', locales, options);
-
- var internalOptions = {};
- defineWEProperty(internalOptions, 'style', getOption(
- 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
-
- var currency = getOption('currency', 'string');
- if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
- throw new RangeError('Invalid currency code: ' + currency);
- }
-
- if (internalOptions.style === 'currency' && currency === undefined) {
- throw new TypeError('Currency code is required with currency style.');
- }
-
- var currencyDisplay = getOption(
- 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
- if (internalOptions.style === 'currency') {
- defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
- defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
- }
-
- // Digit ranges.
- var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
- defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
-
- var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
- defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
-
- var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
- defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
-
- var mnsd = options['minimumSignificantDigits'];
- var mxsd = options['maximumSignificantDigits'];
- if (mnsd !== undefined || mxsd !== undefined) {
- mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
- defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
-
- mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
- defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
- }
-
- // Grouping.
- defineWEProperty(internalOptions, 'useGrouping', getOption(
- 'useGrouping', 'boolean', undefined, true));
-
- // ICU prefers options to be passed using -u- extension key/values for
- // number format, so we need to build that.
- var extensionMap = parseExtension(locale.extension);
- var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
- getOption, internalOptions);
-
- var requestedLocale = locale.locale + extension;
- var resolved = Object.defineProperties({}, {
- currency: {writable: true},
- currencyDisplay: {writable: true},
- locale: {writable: true},
- maximumFractionDigits: {writable: true},
- minimumFractionDigits: {writable: true},
- minimumIntegerDigits: {writable: true},
- numberingSystem: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- style: {value: internalOptions.style, writable: true},
- useGrouping: {writable: true}
- });
- if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
- defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
- }
- if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
- defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
- }
- var formatter = %CreateNumberFormat(requestedLocale,
- internalOptions,
- resolved);
-
- // We can't get information about number or currency style from ICU, so we
- // assume user request was fulfilled.
- if (internalOptions.style === 'currency') {
- Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
- writable: true});
- }
-
- Object.defineProperty(numberFormat, 'formatter', {value: formatter});
- Object.defineProperty(numberFormat, 'resolved', {value: resolved});
- Object.defineProperty(numberFormat, '__initializedIntlObject',
- {value: 'numberformat'});
-
- return numberFormat;
-}
-
-
-/**
- * Constructs Intl.NumberFormat object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'NumberFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.NumberFormat(locales, options);
- }
-
- return initializeNumberFormat(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * NumberFormat resolvedOptions method.
- */
-%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'numberformat') {
- throw new TypeError('resolvedOptions method called on a non-object' +
- ' or on a object that is not Intl.NumberFormat.');
- }
-
- var format = this;
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
-
- var result = {
- locale: locale,
- numberingSystem: format.resolved.numberingSystem,
- style: format.resolved.style,
- useGrouping: format.resolved.useGrouping,
- minimumIntegerDigits: format.resolved.minimumIntegerDigits,
- minimumFractionDigits: format.resolved.minimumFractionDigits,
- maximumFractionDigits: format.resolved.maximumFractionDigits,
- };
-
- if (result.style === 'currency') {
- defineWECProperty(result, 'currency', format.resolved.currency);
- defineWECProperty(result, 'currencyDisplay',
- format.resolved.currencyDisplay);
- }
-
- if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
- defineWECProperty(result, 'minimumSignificantDigits',
- format.resolved.minimumSignificantDigits);
- }
-
- if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
- defineWECProperty(result, 'maximumSignificantDigits',
- format.resolved.maximumSignificantDigits);
- }
-
- return result;
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('numberformat', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
-
-
-/**
- * Returns a String value representing the result of calling ToNumber(value)
- * according to the effective locale and the formatting options of this
- * NumberFormat.
- */
-function formatNumber(formatter, value) {
- // Spec treats -0 and +0 as 0.
- var number = Number(value);
- if (number === -0) {
- number = 0;
- }
-
- return %InternalNumberFormat(formatter.formatter, number);
-}
-
-
-/**
- * Returns a Number that represents string value that was passed in.
- */
-function parseNumber(formatter, value) {
- return %InternalNumberParse(formatter.formatter, String(value));
-}
-
-
-addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
-addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
diff --git a/deps/v8/src/extensions/i18n/overrides.js b/deps/v8/src/extensions/i18n/overrides.js
deleted file mode 100644
index b2d60b3fc..000000000
--- a/deps/v8/src/extensions/i18n/overrides.js
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-
-// Save references to Intl objects and methods we use, for added security.
-var savedObjects = {
- 'collator': Intl.Collator,
- 'numberformat': Intl.NumberFormat,
- 'dateformatall': Intl.DateTimeFormat,
- 'dateformatdate': Intl.DateTimeFormat,
- 'dateformattime': Intl.DateTimeFormat
-};
-
-
-// Default (created with undefined locales and options parameters) collator,
-// number and date format instances. They'll be created as needed.
-var defaultObjects = {
- 'collator': undefined,
- 'numberformat': undefined,
- 'dateformatall': undefined,
- 'dateformatdate': undefined,
- 'dateformattime': undefined,
-};
-
-
-/**
- * Returns cached or newly created instance of a given service.
- * We cache only default instances (where no locales or options are provided).
- */
-function cachedOrNewService(service, locales, options, defaults) {
- var useOptions = (defaults === undefined) ? options : defaults;
- if (locales === undefined && options === undefined) {
- if (defaultObjects[service] === undefined) {
- defaultObjects[service] = new savedObjects[service](locales, useOptions);
- }
- return defaultObjects[service];
- }
- return new savedObjects[service](locales, useOptions);
-}
-
-
-/**
- * Compares this and that, and returns less than 0, 0 or greater than 0 value.
- * Overrides the built-in method.
- */
-Object.defineProperty(String.prototype, 'localeCompare', {
- value: function(that) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (this === undefined || this === null) {
- throw new TypeError('Method invoked on undefined or null value.');
- }
-
- var locales = arguments[1];
- var options = arguments[2];
- var collator = cachedOrNewService('collator', locales, options);
- return compare(collator, this, that);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(String.prototype.localeCompare, 'localeCompare');
-%FunctionRemovePrototype(String.prototype.localeCompare);
-%SetNativeFlag(String.prototype.localeCompare);
-
-
-/**
- * Formats a Number object (this) using locale and options values.
- * If locale or options are omitted, defaults are used.
- */
-Object.defineProperty(Number.prototype, 'toLocaleString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!(this instanceof Number) && typeof(this) !== 'number') {
- throw new TypeError('Method invoked on an object that is not Number.');
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- var numberFormat = cachedOrNewService('numberformat', locales, options);
- return formatNumber(numberFormat, this);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Number.prototype.toLocaleString, 'toLocaleString');
-%FunctionRemovePrototype(Number.prototype.toLocaleString);
-%SetNativeFlag(Number.prototype.toLocaleString);
-
-
-/**
- * Returns actual formatted date or fails if date parameter is invalid.
- */
-function toLocaleDateTime(date, locales, options, required, defaults, service) {
- if (!(date instanceof Date)) {
- throw new TypeError('Method invoked on an object that is not Date.');
- }
-
- if (isNaN(date)) {
- return 'Invalid Date';
- }
-
- var internalOptions = toDateTimeOptions(options, required, defaults);
-
- var dateFormat =
- cachedOrNewService(service, locales, options, internalOptions);
-
- return formatDate(dateFormat, date);
-}
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - both date and time are
- * present in the output.
- */
-Object.defineProperty(Date.prototype, 'toLocaleString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- return toLocaleDateTime(
- this, locales, options, 'any', 'all', 'dateformatall');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Date.prototype.toLocaleString, 'toLocaleString');
-%FunctionRemovePrototype(Date.prototype.toLocaleString);
-%SetNativeFlag(Date.prototype.toLocaleString);
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - only date is present
- * in the output.
- */
-Object.defineProperty(Date.prototype, 'toLocaleDateString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- return toLocaleDateTime(
- this, locales, options, 'date', 'date', 'dateformatdate');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Date.prototype.toLocaleDateString, 'toLocaleDateString');
-%FunctionRemovePrototype(Date.prototype.toLocaleDateString);
-%SetNativeFlag(Date.prototype.toLocaleDateString);
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - only time is present
- * in the output.
- */
-Object.defineProperty(Date.prototype, 'toLocaleTimeString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- return toLocaleDateTime(
- this, locales, options, 'time', 'time', 'dateformattime');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
-%FunctionRemovePrototype(Date.prototype.toLocaleTimeString);
-%SetNativeFlag(Date.prototype.toLocaleTimeString);
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 32bc07de8..651d99d45 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -60,7 +60,7 @@ static void AddNumber(v8::Local<v8::Object> object,
void StatisticsExtension::GetCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 9323c2f1f..acbaf3c86 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -130,7 +130,8 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors, slack),
+ DescriptorArray::Allocate(
+ isolate(), number_of_descriptors, slack),
DescriptorArray);
}
@@ -140,7 +141,8 @@ Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationInputData::Allocate(deopt_entry_count,
+ DeoptimizationInputData::Allocate(isolate(),
+ deopt_entry_count,
pretenure),
DeoptimizationInputData);
}
@@ -151,7 +153,8 @@ Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationOutputData::Allocate(deopt_entry_count,
+ DeoptimizationOutputData::Allocate(isolate(),
+ deopt_entry_count,
pretenure),
DeoptimizationOutputData);
}
@@ -664,7 +667,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
return result;
}
- if (V8::UseCrankshaft() &&
+ if (isolate()->use_crankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
@@ -806,7 +809,7 @@ Handle<String> Factory::EmergencyNewError(const char* message,
*p++ = ' ';
space--;
if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(i);
+ MaybeObject* maybe_arg = args->GetElement(isolate(), i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
@@ -1080,13 +1083,6 @@ void Factory::SetContent(Handle<JSArray> array,
}
-void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainHeapObjectElements());
-}
-
-
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
@@ -1190,13 +1186,6 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
-void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- object->SetIdentityHash(hash, ALLOW_CREATION));
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
@@ -1328,7 +1317,7 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(isolate()), Object);
}
@@ -1467,15 +1456,29 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
- // Recursively copy parent templates' accessors, 'data' may be modified.
+ // Recursively copy parent instance templates' accessors,
+ // 'data' may be modified.
int max_number_of_additional_properties = 0;
+ int max_number_of_static_properties = 0;
FunctionTemplateInfo* info = *obj;
while (true) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate());
- NeanderArray props_array(props_handle);
- max_number_of_additional_properties += props_array.length();
+ if (!info->instance_template()->IsUndefined()) {
+ Object* props =
+ ObjectTemplateInfo::cast(
+ info->instance_template())->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate());
+ NeanderArray props_array(props_handle);
+ max_number_of_additional_properties += props_array.length();
+ }
+ }
+ if (!info->property_accessors()->IsUndefined()) {
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate());
+ NeanderArray props_array(props_handle);
+ max_number_of_static_properties += props_array.length();
+ }
}
Object* parent = info->parent_template();
if (parent->IsUndefined()) break;
@@ -1484,17 +1487,44 @@ Handle<JSFunction> Factory::CreateApiFunction(
Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
+ // Use a temporary FixedArray to acculumate static accessors
+ int valid_descriptors = 0;
+ Handle<FixedArray> array;
+ if (max_number_of_static_properties > 0) {
+ array = NewFixedArray(max_number_of_static_properties);
+ }
+
while (true) {
- Handle<Object> props = Handle<Object>(obj->property_accessors(),
- isolate());
- if (!props->IsUndefined()) {
- Map::AppendCallbackDescriptors(map, props);
+ // Install instance descriptors
+ if (!obj->instance_template()->IsUndefined()) {
+ Handle<ObjectTemplateInfo> instance =
+ Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()), isolate());
+ Handle<Object> props = Handle<Object>(instance->property_accessors(),
+ isolate());
+ if (!props->IsUndefined()) {
+ Map::AppendCallbackDescriptors(map, props);
+ }
+ }
+ // Accumulate static accessors
+ if (!obj->property_accessors()->IsUndefined()) {
+ Handle<Object> props = Handle<Object>(obj->property_accessors(),
+ isolate());
+ valid_descriptors =
+ AccessorInfo::AppendUnique(props, array, valid_descriptors);
}
+ // Climb parent chain
Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
+ // Install accumulated static accessors
+ for (int i = 0; i < valid_descriptors; i++) {
+ Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+ JSObject::SetAccessor(result, accessor);
+ }
+
ASSERT(result->shared()->IsApiFunction());
return result;
}
@@ -1593,7 +1623,8 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
// instance template.
Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) {
- Execution::ConfigureInstance(instance,
+ Execution::ConfigureInstance(isolate(),
+ instance,
instance_template,
pending_exception);
} else {
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 02c9a4d2e..1bdf47433 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -328,7 +328,6 @@ class Factory {
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
- void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
@@ -346,8 +345,6 @@ class Factory {
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
- void SetIdentityHash(Handle<JSObject> object, Smi* hash);
-
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@@ -642,6 +639,24 @@ class IdempotentPointerToHandleCodeTrampoline {
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
+ template<typename R, typename P1, typename P2, typename P3, typename P4,
+ typename P5, typename P6, typename P7>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1, P2, P3, P4, P5, P6, P7),
+ P1 p1,
+ P2 p2,
+ P3 p3,
+ P4 p4,
+ P5 p5,
+ P6 p6,
+ P7 p7) {
+ int collections = isolate_->heap()->gc_count();
+ Handle<Object> result = (*function)(p1, p2, p3, p4, p5, p6, p7);
+ return (collections == isolate_->heap()->gc_count())
+ ? *result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
private:
Isolate* isolate_;
};
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index c0ad4a8e1..08cd8304e 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -41,15 +41,12 @@
extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
static ctype const FLAG_##nam = def;
-#define DEFINE_implication(whenflag, thenflag)
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
#elif defined(FLAG_MODE_DEFINE)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
ctype FLAG_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
// We need to define all of our default values so that the Flag structure can
// access them by pointer. These are just used internally inside of one .cc,
@@ -57,21 +54,18 @@
#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
static ctype const FLAGDEFAULT_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
#elif defined(FLAG_MODE_META)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
{ Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
+#define FLAG_ALIAS(ftype, ctype, alias, nam) \
+ { Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
+ "alias for --"#nam, false },
// We produce the code to set flags when it is implied by another flag.
#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt)
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
#define DEFINE_implication(whenflag, thenflag) \
if (FLAG_##whenflag) FLAG_##thenflag = true;
@@ -79,6 +73,24 @@
#error No mode supplied when including flags.defs
#endif
+// Dummy defines for modes where it is not relevant.
+#ifndef FLAG_FULL
+#define FLAG_FULL(ftype, ctype, nam, def, cmt)
+#endif
+
+#ifndef FLAG_READONLY
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#endif
+
+#ifndef FLAG_ALIAS
+#define FLAG_ALIAS(ftype, ctype, alias, nam)
+#endif
+
+#ifndef DEFINE_implication
+#define DEFINE_implication(whenflag, thenflag)
+#endif
+
+
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
#define JSARGUMENTS_INIT {{}}
@@ -135,11 +147,18 @@ public:
# define ENABLE_32DREGS_DEFAULT false
#endif
-#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
-#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
-#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
+#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+
+#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
+#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
+#define DEFINE_ALIAS_float(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
+#define DEFINE_ALIAS_string(alias, nam) \
+ FLAG_ALIAS(STRING, const char*, alias, nam)
+#define DEFINE_ALIAS_args(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
//
// Flags in all modes.
@@ -164,9 +183,9 @@ DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
-DEFINE_bool(harmony_typed_arrays, false,
+DEFINE_bool(harmony_typed_arrays, true,
"enable harmony typed arrays")
-DEFINE_bool(harmony_array_buffer, false,
+DEFINE_bool(harmony_array_buffer, true,
"enable harmony array buffer")
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
@@ -194,8 +213,6 @@ DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
- "generate keyed store stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
@@ -239,6 +256,7 @@ DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
+DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter")
DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
@@ -305,13 +323,17 @@ DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in "
"safe operations")
-DEFINE_bool(parallel_recompilation, true,
+DEFINE_bool(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
-DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 8,
- "the length of the parallel compilation queue")
-DEFINE_int(parallel_recompilation_delay, 0,
+DEFINE_bool(trace_concurrent_recompilation, false,
+ "track concurrent recompilation")
+DEFINE_int(concurrent_recompilation_queue_length, 8,
+ "the length of the concurrent compilation queue")
+DEFINE_int(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
+DEFINE_bool(concurrent_osr, false,
+ "concurrent on-stack replacement")
+
DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
@@ -358,8 +380,6 @@ DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
-DEFINE_bool(enable_rdtsc, true,
- "enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
@@ -381,7 +401,6 @@ DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
// bootstrapper.cc
-DEFINE_bool(enable_i18n, true, "enable i18n extension")
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
@@ -436,6 +455,10 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_interval, 1000,
+ "CPU profiler sampling interval in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(trace_js_array_abuse, false,
@@ -521,6 +544,7 @@ DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
+DEFINE_bool(js_accessor_ics, false, "create ics for js accessors")
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
@@ -590,7 +614,7 @@ DEFINE_int(testing_int_flag, 13, "testing_int_flag")
DEFINE_float(testing_float_flag, 2.5, "float-flag")
DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
-#ifdef WIN32
+#ifdef _WIN32
DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
"file in which to testing_serialize heap")
#else
@@ -665,13 +689,14 @@ DEFINE_bool(stress_compaction, false,
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
-// codegen-ia32.cc / codegen-arm.cc
+// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
DEFINE_bool(print_builtin_source, false,
"pretty print source code for builtins")
DEFINE_bool(print_ast, false, "print source AST")
DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_bool(trap_on_abort, false, "replace aborts by breakpoints")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -744,9 +769,6 @@ DEFINE_bool(log_snapshot_positions, false,
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_lazy, false,
- "Used with --prof, only does sampling and logging"
- " when profiler is active.")
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
@@ -809,11 +831,19 @@ DEFINE_implication(print_all_code, trace_codegen)
#undef FLAG_FULL
#undef FLAG_READONLY
#undef FLAG
+#undef FLAG_ALIAS
#undef DEFINE_bool
#undef DEFINE_int
#undef DEFINE_string
+#undef DEFINE_float
+#undef DEFINE_args
#undef DEFINE_implication
+#undef DEFINE_ALIAS_bool
+#undef DEFINE_ALIAS_int
+#undef DEFINE_ALIAS_string
+#undef DEFINE_ALIAS_float
+#undef DEFINE_ALIAS_args
#undef FLAG_MODE_DECLARE
#undef FLAG_MODE_DEFINE
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 855e20712..4e18cc8c8 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -268,6 +268,11 @@ List<const char*>* FlagList::argv() {
}
+inline char NormalizeChar(char ch) {
+ return ch == '_' ? '-' : ch;
+}
+
+
// Helper function to parse flags: Takes an argument arg and splits it into
// a flag name and flag value (or NULL if they are missing). is_bool is set
// if the arg started with "-no" or "--no". The buffer may be used to NUL-
@@ -295,6 +300,7 @@ static void SplitArgument(const char* arg,
}
if (arg[0] == 'n' && arg[1] == 'o') {
arg += 2; // remove "no"
+ if (NormalizeChar(arg[0]) == '-') arg++; // remove dash after "no".
*is_bool = true;
}
*name = arg;
@@ -318,11 +324,6 @@ static void SplitArgument(const char* arg,
}
-inline char NormalizeChar(char ch) {
- return ch == '_' ? '-' : ch;
-}
-
-
static bool EqualNames(const char* a, const char* b) {
for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
if (a[i] == '\0') {
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index f0b239b6f..fe182e522 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -24,9 +24,12 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
+#include "atomicops.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index c17a9d5f8..167277f79 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -489,7 +489,7 @@ Address StackFrame::UnpaddedFP() const {
Code* EntryFrame::unchecked_code() const {
- return HEAP->js_entry_code();
+ return isolate()->heap()->js_entry_code();
}
@@ -512,7 +512,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->js_construct_entry_code();
+ return isolate()->heap()->js_construct_entry_code();
}
@@ -814,8 +814,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
PrintF("+%d", code_offset);
SharedFunctionInfo* shared = fun->shared();
if (print_line_number) {
- Code* code = Code::cast(
- v8::internal::Isolate::Current()->FindCodeObject(pc));
+ Code* code = Code::cast(isolate->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index f5539e8b1..91a51731a 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -333,7 +333,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
- !info->function()->flags()->Contains(kDontOptimize) &&
+ !info->function()->dont_optimize() &&
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
@@ -350,21 +350,17 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_back_edge_table_offset(table_offset);
code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // May be an empty handle.
+ info->SetCode(code);
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
+ if (FLAG_gdbjit) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
-
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
- if (!code.is_null()) {
- void* line_info =
- masm.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
- }
- return !code.is_null();
+ void* line_info = masm.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
+ return true;
}
@@ -419,7 +415,7 @@ void FullCodeGenerator::Initialize() {
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
- InitializeAstVisitor();
+ InitializeAstVisitor(info_->isolate());
}
@@ -834,7 +830,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker;
+ BreakableStatementChecker checker(isolate());
checker.Check(stmt);
// Record the statement position right here if the statement is not
// breakable. For breakable statements the actual recording of the
@@ -860,7 +856,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker;
+ BreakableStatementChecker checker(isolate());
checker.Check(expr);
// Record a statement position right here if the expression is not
// breakable. For breakable expressions the actual recording of the
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index af63aedfb..5580cb3e8 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -52,8 +52,8 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- BreakableStatementChecker() : is_breakable_(false) {
- InitializeAstVisitor();
+ explicit BreakableStatementChecker(Isolate* isolate) : is_breakable_(false) {
+ InitializeAstVisitor(isolate);
}
void Check(Statement* stmt);
@@ -141,7 +141,8 @@ class FullCodeGenerator: public AstVisitor {
class BackEdgeTableIterator {
public:
- explicit BackEdgeTableIterator(Code* unoptimized) {
+ explicit BackEdgeTableIterator(Code* unoptimized,
+ DisallowHeapAllocation* required) {
ASSERT(unoptimized->kind() == Code::FUNCTION);
instruction_start_ = unoptimized->instruction_start();
cursor_ = instruction_start_ + unoptimized->back_edge_table_offset();
@@ -192,7 +193,6 @@ class FullCodeGenerator: public AstVisitor {
Address end_;
Address instruction_start_;
uint32_t table_length_;
- DisallowHeapAllocation no_gc_while_iterating_over_raw_addresses_;
DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator);
};
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 74db807fb..21cfd2233 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1872,7 +1872,7 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry,
bool dump_if_enabled,
const char* name_hint) {
-#if defined(DEBUG) && !defined(WIN32)
+#if defined(DEBUG) && !V8_OS_WIN
static int file_num = 0;
if (FLAG_gdbjit_dump && dump_if_enabled) {
static const int kMaxFileNameSize = 64;
@@ -2063,7 +2063,7 @@ void GDBJITInterface::AddCode(const char* name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2149,7 +2149,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@@ -2187,7 +2187,7 @@ void GDBJITInterface::RemoveCodeRange(Address start, Address end) {
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 5df9dd4c6..1a98e49ff 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -90,7 +90,7 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
- weak_reference_callback_ = NULL;
+ weak_callback_ = NULL;
}
#endif
@@ -111,21 +111,19 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
- weak_reference_callback_ = NULL;
+ weak_callback_ = NULL;
IncreaseBlockUses();
}
void Release() {
ASSERT(state() != FREE);
set_state(FREE);
-#ifdef ENABLE_EXTRA_CHECKS
// Zap the values for eager trapping.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
set_partially_dependent(false);
- weak_reference_callback_ = NULL;
-#endif
+ weak_callback_ = NULL;
DecreaseBlockUses();
}
@@ -171,6 +169,13 @@ class GlobalHandles::Node {
flags_ = IsInNewSpaceList::update(flags_, v);
}
+ bool is_revivable_callback() {
+ return IsRevivableCallback::decode(flags_);
+ }
+ void set_revivable_callback(bool v) {
+ flags_ = IsRevivableCallback::update(flags_, v);
+ }
+
bool IsNearDeath() const {
// Check for PENDING to ensure correct answer when processing callbacks.
return state() == PENDING || state() == NEAR_DEATH;
@@ -230,11 +235,20 @@ class GlobalHandles::Node {
}
void MakeWeak(void* parameter,
- RevivableCallback weak_reference_callback) {
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback) {
+ ASSERT((weak_callback == NULL) != (revivable_callback == NULL));
ASSERT(state() != FREE);
set_state(WEAK);
set_parameter(parameter);
- weak_reference_callback_ = weak_reference_callback;
+ if (weak_callback != NULL) {
+ weak_callback_ = weak_callback;
+ set_revivable_callback(false);
+ } else {
+ weak_callback_ =
+ reinterpret_cast<WeakCallback>(revivable_callback);
+ set_revivable_callback(true);
+ }
}
void ClearWeakness() {
@@ -245,7 +259,7 @@ class GlobalHandles::Node {
bool PostGarbageCollectionProcessing(Isolate* isolate) {
if (state() != Node::PENDING) return false;
- if (weak_reference_callback_ == NULL) {
+ if (weak_callback_ == NULL) {
Release();
return false;
}
@@ -264,9 +278,20 @@ class GlobalHandles::Node {
// Leaving V8.
VMState<EXTERNAL> state(isolate);
HandleScope handle_scope(isolate);
- weak_reference_callback_(reinterpret_cast<v8::Isolate*>(isolate),
- reinterpret_cast<Persistent<Value>*>(&object),
- par);
+ if (is_revivable_callback()) {
+ RevivableCallback revivable =
+ reinterpret_cast<RevivableCallback>(weak_callback_);
+ revivable(reinterpret_cast<v8::Isolate*>(isolate),
+ reinterpret_cast<Persistent<Value>*>(&object),
+ par);
+ } else {
+ Handle<Object> handle(*object, isolate);
+ v8::WeakCallbackData<v8::Value, void> data(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ v8::Utils::ToLocal(handle),
+ par);
+ weak_callback_(data);
+ }
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
@@ -274,9 +299,10 @@ class GlobalHandles::Node {
return true;
}
+ inline GlobalHandles* GetGlobalHandles();
+
private:
inline NodeBlock* FindBlock();
- inline GlobalHandles* GetGlobalHandles();
inline void IncreaseBlockUses();
inline void DecreaseBlockUses();
@@ -299,11 +325,12 @@ class GlobalHandles::Node {
class IsIndependent: public BitField<bool, 4, 1> {};
class IsPartiallyDependent: public BitField<bool, 5, 1> {};
class IsInNewSpaceList: public BitField<bool, 6, 1> {};
+ class IsRevivableCallback: public BitField<bool, 7, 1> {};
uint8_t flags_;
// Handle specific callback - might be a weak reference in disguise.
- RevivableCallback weak_reference_callback_;
+ WeakCallback weak_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@@ -482,6 +509,12 @@ Handle<Object> GlobalHandles::Create(Object* value) {
}
+Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
+ ASSERT(location != NULL);
+ return Node::FromLocation(location)->GetGlobalHandles()->Create(*location);
+}
+
+
void GlobalHandles::Destroy(Object** location) {
if (location != NULL) Node::FromLocation(location)->Release();
}
@@ -489,9 +522,10 @@ void GlobalHandles::Destroy(Object** location) {
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- RevivableCallback weak_reference_callback) {
- ASSERT(weak_reference_callback != NULL);
- Node::FromLocation(location)->MakeWeak(parameter, weak_reference_callback);
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback) {
+ Node::FromLocation(location)->MakeWeak(
+ parameter, weak_callback, revivable_callback);
}
@@ -1020,7 +1054,6 @@ void GlobalHandles::ComputeObjectGroupsAndImplicitReferences() {
EternalHandles::EternalHandles() : size_(0) {
- STATIC_ASSERT(v8::kUninitializedEternalIndex == kInvalidIndex);
for (unsigned i = 0; i < ARRAY_SIZE(singleton_handles_); i++) {
singleton_handles_[i] = kInvalidIndex;
}
@@ -1062,8 +1095,9 @@ void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
}
-int EternalHandles::Create(Isolate* isolate, Object* object) {
- if (object == NULL) return kInvalidIndex;
+void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
+ ASSERT_EQ(kInvalidIndex, *index);
+ if (object == NULL) return;
ASSERT_NE(isolate->heap()->the_hole_value(), object);
int block = size_ >> kShift;
int offset = size_ & kMask;
@@ -1079,7 +1113,7 @@ int EternalHandles::Create(Isolate* isolate, Object* object) {
if (isolate->heap()->InNewSpace(object)) {
new_space_indices_.Add(size_);
}
- return size_++;
+ *index = size_++;
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 5a4ad13e2..4b46aac05 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -128,9 +128,13 @@ class GlobalHandles {
// Creates a new global handle that is alive until Destroy is called.
Handle<Object> Create(Object* value);
+ // Copy a global handle
+ static Handle<Object> CopyGlobal(Object** location);
+
// Destroy a global handle.
static void Destroy(Object** location);
+ typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
// Make the global handle weak and set the callback parameter for the
@@ -141,7 +145,14 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
static void MakeWeak(Object** location,
void* parameter,
- RevivableCallback weak_reference_callback);
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback);
+
+ static inline void MakeWeak(Object** location,
+ void* parameter,
+ RevivableCallback revivable_callback) {
+ MakeWeak(location, parameter, NULL, revivable_callback);
+ }
void RecordStats(HeapStats* stats);
@@ -346,8 +357,8 @@ class EternalHandles {
int NumberOfHandles() { return size_; }
- // Create an EternalHandle, returning the index.
- int Create(Isolate* isolate, Object* object);
+ // Create an EternalHandle, overwriting the index.
+ void Create(Isolate* isolate, Object* object, int* index);
// Grab the handle for an existing EternalHandle.
inline Handle<Object> Get(int index) {
@@ -369,8 +380,7 @@ class EternalHandles {
Handle<Object> CreateSingleton(Isolate* isolate,
Object* object,
SingletonHandle singleton) {
- ASSERT(singleton_handles_[singleton] == kInvalidIndex);
- singleton_handles_[singleton] = Create(isolate, object);
+ Create(isolate, object, &singleton_handles_[singleton]);
return Get(singleton_handles_[singleton]);
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 26fd53114..1977e68c8 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -28,37 +28,22 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
-// Define V8_INFINITY
-#define V8_INFINITY INFINITY
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-#define __GNUC_VERSION_FOR_INFTY__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+#include "../include/v8stdint.h"
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
// For now, we use the more involved template-based version from <limits>, but
// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION_FOR_INFTY__ >= 29600 && __GNUC_VERSION_FOR_INFTY__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-#undef __GNUC_VERSION_FOR_INFTY__
-
-#endif // __GNUC__
-
-#ifdef _MSC_VER
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
+#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
+# include <limits> // NOLINT
+# define V8_INFINITY std::numeric_limits<double>::infinity()
+#elif V8_CC_MSVC
+# define V8_INFINITY HUGE_VAL
+#else
+# define V8_INFINITY INFINITY
#endif
-
-#include "../include/v8stdint.h"
-
namespace v8 {
namespace internal {
@@ -186,27 +171,32 @@ typedef byte* Address;
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
-#if V8_HOST_ARCH_64_BIT
-#if defined(_MSC_VER)
-#define V8_UINT64_C(x) (x ## UI64)
-#define V8_INT64_C(x) (x ## I64)
-#define V8_INTPTR_C(x) (x ## I64)
-#define V8_PTR_PREFIX "ll"
-#elif defined(__MINGW64__)
-#define V8_UINT64_C(x) (x ## ULL)
-#define V8_INT64_C(x) (x ## LL)
-#define V8_INTPTR_C(x) (x ## LL)
-#define V8_PTR_PREFIX "I64"
+#if V8_CC_MSVC
+# define V8_UINT64_C(x) (x ## UI64)
+# define V8_INT64_C(x) (x ## I64)
+# if V8_HOST_ARCH_64_BIT
+# define V8_INTPTR_C(x) (x ## I64)
+# define V8_PTR_PREFIX "ll"
+# else
+# define V8_INTPTR_C(x) (x)
+# define V8_PTR_PREFIX ""
+# endif // V8_HOST_ARCH_64_BIT
+#elif V8_CC_MINGW64
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# define V8_INTPTR_C(x) (x ## LL)
+# define V8_PTR_PREFIX "I64"
+#elif V8_HOST_ARCH_64_BIT
+# define V8_UINT64_C(x) (x ## UL)
+# define V8_INT64_C(x) (x ## L)
+# define V8_INTPTR_C(x) (x ## L)
+# define V8_PTR_PREFIX "l"
#else
-#define V8_UINT64_C(x) (x ## UL)
-#define V8_INT64_C(x) (x ## L)
-#define V8_INTPTR_C(x) (x ## L)
-#define V8_PTR_PREFIX "l"
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# define V8_INTPTR_C(x) (x)
+# define V8_PTR_PREFIX ""
#endif
-#else // V8_HOST_ARCH_64_BIT
-#define V8_INTPTR_C(x) (x)
-#define V8_PTR_PREFIX ""
-#endif // V8_HOST_ARCH_64_BIT
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
@@ -292,6 +282,10 @@ const int kOneByteSize = kCharSize;
const int kUC16Size = sizeof(uc16); // NOLINT
+// Round up n to be a multiple of sz, where sz is a power of 2.
+#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
+
+
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
@@ -330,18 +324,11 @@ F FUNCTION_CAST(Address addr) {
}
-#if __cplusplus >= 201103L
-#define DISALLOW_BY_DELETE = delete
-#else
-#define DISALLOW_BY_DELETE
-#endif
-
-
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&) DISALLOW_BY_DELETE; \
- void operator=(const TypeName&) DISALLOW_BY_DELETE
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) V8_DELETE; \
+ void operator=(const TypeName&) V8_DELETE
// A macro to disallow all the implicit constructors, namely the
@@ -350,36 +337,18 @@ F FUNCTION_CAST(Address addr) {
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName() DISALLOW_BY_DELETE; \
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() V8_DELETE; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
-// Define used for helping GCC to make better inlining. Don't bother for debug
-// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
-// errors in debug build.
-#if defined(__GNUC__) && !defined(DEBUG)
-#if (__GNUC__ >= 4)
-#define INLINE(header) inline header __attribute__((always_inline))
-#define NO_INLINE(header) header __attribute__((noinline))
-#else
-#define INLINE(header) inline __attribute__((always_inline)) header
-#define NO_INLINE(header) __attribute__((noinline)) header
-#endif
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define INLINE(header) __forceinline header
-#define NO_INLINE(header) header
-#else
-#define INLINE(header) inline header
-#define NO_INLINE(header) header
-#endif
+// Newly written code should use V8_INLINE and V8_NOINLINE directly.
+#define INLINE(declarator) V8_INLINE declarator
+#define NO_INLINE(declarator) V8_NOINLINE declarator
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
-#else
-#define MUST_USE_RESULT
-#endif
+// Newly written code should use V8_WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
// Define DISABLE_ASAN macros.
@@ -424,18 +393,6 @@ enum LanguageMode {
};
-// A simple Maybe type, that can be passed by value.
-template<class T>
-struct Maybe {
- Maybe() : has_value(false) {}
- explicit Maybe(T t) : has_value(true), value(t) {}
- Maybe(bool has, T t) : has_value(has), value(t) {}
-
- bool has_value;
- T value;
-};
-
-
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
//
// This flag is used in the backend to represent the language mode. So far
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 4f4490b75..5b879d8f0 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -32,6 +32,7 @@
#include "api.h"
#include "apiutils.h"
#include "handles.h"
+#include "heap.h"
#include "isolate.h"
namespace v8 {
@@ -85,11 +86,13 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
- Isolate* isolate = heap_object->GetIsolate();
+ Heap* heap = heap_object->GetHeap();
Object** handle = reinterpret_cast<Object**>(location_);
- Object** roots_array_start = isolate->heap()->roots_array_start();
+ Object** roots_array_start = heap->roots_array_start();
if (roots_array_start <= handle &&
- handle < roots_array_start + Heap::kStrongRootListLength) {
+ handle < roots_array_start + Heap::kStrongRootListLength &&
+ heap->RootCanBeTreatedAsConstant(
+ static_cast<Heap::RootListIndex>(handle - roots_array_start))) {
return true;
}
if (!AllowHandleDereference::IsAllowed()) return false;
@@ -98,7 +101,7 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
// Accessing maps and internalized strings is safe.
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
- return !isolate->IsDeferredHandle(handle);
+ return !heap->isolate()->IsDeferredHandle(handle);
}
return true;
}
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 48114d91a..b3704df69 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -208,17 +208,6 @@ Handle<String> FlattenGetString(Handle<String> string) {
}
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionSetPrototype(*function,
- *prototype,
- NULL),
- Object);
-}
-
-
Handle<Object> SetProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -320,11 +309,6 @@ Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
}
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
-}
-
-
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
@@ -543,8 +527,9 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
- v8::NamedPropertyEnumerator enum_fun =
- v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
+ v8::NamedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::NamedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
result = args.Call(enum_fun);
}
@@ -565,8 +550,9 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumerator enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
+ v8::IndexedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
result = args.Call(enum_fun);
#if ENABLE_EXTRA_CHECKS
@@ -629,8 +615,12 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
if (p->IsJSProxy()) {
Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
Handle<Object> args[] = { proxy };
- Handle<Object> names = Execution::Call(
- isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+ Handle<Object> names = Execution::Call(isolate,
+ isolate->proxy_enumerate(),
+ object,
+ ARRAY_SIZE(args),
+ args,
+ threw);
if (*threw) return content;
content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
break;
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 90db7d121..c3e4dca1a 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -259,8 +259,6 @@ Handle<JSObject> Copy(Handle<JSObject> obj);
Handle<JSObject> DeepCopy(Handle<JSObject> obj);
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
-
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);
@@ -322,9 +320,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype);
-
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key);
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 6caa742f5..4f1960386 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -69,7 +69,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
*(--rear_) = size;
// Assert no overflow into live objects.
#ifdef DEBUG
- SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+ SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
reinterpret_cast<Address>(rear_));
#endif
}
@@ -508,7 +508,7 @@ void Heap::ScavengePointer(HeapObject** p) {
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
+ ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
@@ -520,11 +520,13 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// copied.
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
- ASSERT(HEAP->InFromSpace(*p));
+ ASSERT(object->GetIsolate()->heap()->InFromSpace(*p));
*p = dest;
return;
}
+ // AllocationMementos are unrooted and shouldn't survive a scavenge
+ ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
// Call the slow part of scavenge object.
return ScavengeObjectSlow(p, object);
}
@@ -613,10 +615,10 @@ Isolate* Heap::isolate() {
#ifdef DEBUG
-#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
+#define GC_GREEDY_CHECK(ISOLATE) \
+ if (FLAG_gc_greedy) (ISOLATE)->heap()->GarbageCollectionGreedyCheck()
#else
-#define GC_GREEDY_CHECK() { }
+#define GC_GREEDY_CHECK(ISOLATE) { }
#endif
// Calls the FUNCTION_CALL function and retries it up to three times
@@ -628,7 +630,7 @@ Isolate* Heap::isolate() {
#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
do { \
- GC_GREEDY_CHECK(); \
+ GC_GREEDY_CHECK(ISOLATE); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \
Object* __object__ = NULL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
@@ -636,7 +638,7 @@ Isolate* Heap::isolate() {
OOM; \
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
+ (ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
allocation_space(), \
"allocation failure"); \
__maybe_object__ = FUNCTION_CALL; \
@@ -645,8 +647,8 @@ Isolate* Heap::isolate() {
OOM; \
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
+ (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \
@@ -718,15 +720,13 @@ void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
+ ASSERT(obj != heap_->the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
+ ASSERT(obj != heap_->the_hole_value());
}
#endif
}
@@ -768,7 +768,7 @@ void Heap::CompletelyClearInstanceofCache() {
MaybeObject* TranscendentalCache::Get(Type type, double input) {
SubCache* cache = caches_[type];
if (cache == NULL) {
- caches_[type] = cache = new SubCache(type);
+ caches_[type] = cache = new SubCache(isolate_, type);
}
return cache->Get(input);
}
@@ -833,25 +833,29 @@ AlwaysAllocateScope::AlwaysAllocateScope() {
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
- HEAP->always_allocate_scope_depth_++;
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
+ isolate->heap()->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- HEAP->always_allocate_scope_depth_--;
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->always_allocate_scope_depth_--;
+ ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
}
#ifdef VERIFY_HEAP
NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
- HEAP->no_weak_embedded_maps_verification_scope_depth_++;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->no_weak_embedded_maps_verification_scope_depth_++;
}
NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
- HEAP->no_weak_embedded_maps_verification_scope_depth_--;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->no_weak_embedded_maps_verification_scope_depth_--;
}
#endif
@@ -860,7 +864,7 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->Contains(object));
+ CHECK(object->GetIsolate()->heap()->Contains(object));
CHECK(object->map()->IsMap());
}
}
@@ -868,21 +872,23 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
+ return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
DisallowAllocationFailure::DisallowAllocationFailure() {
#ifdef DEBUG
- old_state_ = HEAP->disallow_allocation_failure_;
- HEAP->disallow_allocation_failure_ = true;
+ Isolate* isolate = Isolate::Current();
+ old_state_ = isolate->heap()->disallow_allocation_failure_;
+ isolate->heap()->disallow_allocation_failure_ = true;
#endif
}
DisallowAllocationFailure::~DisallowAllocationFailure() {
#ifdef DEBUG
- HEAP->disallow_allocation_failure_ = old_state_;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->disallow_allocation_failure_ = old_state_;
#endif
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 1c8a7b3dc..bd47eec63 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -175,6 +175,8 @@ const char* HeapEntry::TypeAsString() {
case kHeapNumber: return "/number/";
case kNative: return "/native/";
case kSynthetic: return "/synthetic/";
+ case kConsString: return "/concatenated string/";
+ case kSlicedString: return "/sliced string/";
default: return "???";
}
}
@@ -470,7 +472,7 @@ void HeapObjectsMap::StopHeapObjectsTracking() {
void HeapObjectsMap::UpdateHeapObjectsMap() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::UpdateHeapObjectsMap");
HeapIterator iterator(heap_);
for (HeapObject* obj = iterator.next();
@@ -558,12 +560,13 @@ void HeapObjectsMap::RemoveDeadEntries() {
}
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
+SnapshotObjectId HeapObjectsMap::GenerateId(Heap* heap,
+ v8::RetainedObjectInfo* info) {
SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
const char* label = info->GetLabel();
id ^= StringHasher::HashSequentialString(label,
static_cast<int>(strlen(label)),
- HEAP->HashSeed());
+ heap->HashSeed());
intptr_t element_count = info->GetElementCount();
if (element_count != -1)
id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
@@ -583,6 +586,7 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
: is_tracking_objects_(false),
+ names_(heap),
ids_(heap) {
}
@@ -621,7 +625,7 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
SnapshotObjectId id) {
// First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::FindHeapObjectById");
DisallowHeapAllocation no_allocation;
HeapObject* object = NULL;
@@ -732,7 +736,7 @@ V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver)
- : heap_(Isolate::Current()->heap()),
+ : heap_(snapshot->collection()->heap()),
snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
@@ -782,6 +786,15 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
}
return AddEntry(object, HeapEntry::kObject, name);
} else if (object->IsString()) {
+ String* string = String::cast(object);
+ if (string->IsConsString())
+ return AddEntry(object,
+ HeapEntry::kConsString,
+ "(concatenated string)");
+ if (string->IsSlicedString())
+ return AddEntry(object,
+ HeapEntry::kSlicedString,
+ "(sliced string)");
return AddEntry(object,
HeapEntry::kString,
collection_->names()->GetName(String::cast(object)));
@@ -1852,7 +1865,7 @@ class GlobalObjectsEnumerator : public ObjectVisitor {
// Modifies heap. Must not be run during heap traversal.
void V8HeapExplorer::TagGlobalObjects() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = heap_->isolate();
HandleScope scope(isolate);
GlobalObjectsEnumerator enumerator;
isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -1921,14 +1934,16 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
return snapshot_->AddEntry(
entries_type_,
name,
- HeapObjectsMap::GenerateId(info),
+ HeapObjectsMap::GenerateId(collection_->heap(), info),
size != -1 ? static_cast<int>(size) : 0);
}
NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
+ HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress)
+ : isolate_(snapshot->collection()->heap()->isolate()),
+ snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
embedder_queried_(false),
@@ -1973,7 +1988,7 @@ int NativeObjectsExplorer::EstimateObjectsCount() {
void NativeObjectsExplorer::FillRetainedObjects() {
if (embedder_queried_) return;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = isolate_;
const GCType major_gc_type = kGCTypeMarkSweepCompact;
// Record objects that are joined into ObjectGroups.
isolate->heap()->CallGCPrologueCallbacks(
@@ -2000,7 +2015,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
void NativeObjectsExplorer::FillImplicitReferences() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = isolate_;
List<ImplicitRefGroup*>* groups =
isolate->global_handles()->implicit_ref_groups();
for (int i = 0; i < groups->length(); ++i) {
@@ -2095,7 +2110,7 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
uint32_t hash = StringHasher::HashSequentialString(
label_copy,
static_cast<int>(strlen(label_copy)),
- HEAP->HashSeed());
+ isolate_->heap()->HashSeed());
HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
hash, true);
if (entry->value == NULL) {
@@ -2157,7 +2172,7 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
if (in_groups_.Contains(*p)) return;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = isolate_;
v8::RetainedObjectInfo* info =
isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
if (info == NULL) return;
@@ -2243,15 +2258,15 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
// full GC is reachable from the root when computing dominators.
// This is not true for weakly reachable objects.
// As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(
+ heap_->CollectAllGarbage(
Heap::kMakeHeapIterableMask,
"HeapSnapshotGenerator::GenerateSnapshot");
- Isolate::Current()->heap()->CollectAllGarbage(
+ heap_->CollectAllGarbage(
Heap::kMakeHeapIterableMask,
"HeapSnapshotGenerator::GenerateSnapshot");
#ifdef VERIFY_HEAP
- Heap* debug_heap = Isolate::Current()->heap();
+ Heap* debug_heap = heap_;
CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
CHECK(!debug_heap->code_space()->was_swept_conservatively());
@@ -2583,7 +2598,9 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("regexp") ","
JSON_S("number") ","
JSON_S("native") ","
- JSON_S("synthetic")) ","
+ JSON_S("synthetic") ","
+ JSON_S("concatenated string") ","
+ JSON_S("sliced string")) ","
JSON_S("string") ","
JSON_S("number") ","
JSON_S("number") ","
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index cea995820..7b0cf8f02 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -100,7 +100,9 @@ class HeapEntry BASE_EMBEDDED {
kRegExp = v8::HeapGraphNode::kRegExp,
kHeapNumber = v8::HeapGraphNode::kHeapNumber,
kNative = v8::HeapGraphNode::kNative,
- kSynthetic = v8::HeapGraphNode::kSynthetic
+ kSynthetic = v8::HeapGraphNode::kSynthetic,
+ kConsString = v8::HeapGraphNode::kConsString,
+ kSlicedString = v8::HeapGraphNode::kSlicedString
};
static const int kNoEntry;
@@ -235,7 +237,7 @@ class HeapObjectsMap {
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
size_t GetUsedMemorySize() const;
- static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
+ static SnapshotObjectId GenerateId(Heap* heap, v8::RetainedObjectInfo* info);
static inline SnapshotObjectId GetNthGcSubrootId(int delta);
static const int kObjectIdStep = 2;
@@ -538,7 +540,7 @@ class NativeGroupRetainedObjectInfo;
class NativeObjectsExplorer {
public:
NativeObjectsExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
+ SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
void AddRootEntries(SnapshotFillerInterface* filler);
int EstimateObjectsCount();
@@ -570,6 +572,7 @@ class NativeObjectsExplorer {
NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
+ Isolate* isolate_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
SnapshottingProgressReportingInterface* progress_;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 36d1a1e7b..24e403942 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -38,6 +38,7 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "incremental-marking.h"
+#include "isolate-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -47,6 +48,7 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "store-buffer.h"
+#include "utils/random-number-generator.h"
#include "v8threads.h"
#include "v8utils.h"
#include "vm-state-inl.h"
@@ -704,7 +706,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
int Heap::NotifyContextDisposed() {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
@@ -729,7 +731,7 @@ void Heap::MoveElements(FixedArray* array,
int len) {
if (len == 0) return;
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
OS::MemMove(dst_objects,
array->data_start() + src_index,
@@ -763,9 +765,9 @@ class StringTableVerifier : public ObjectVisitor {
};
-static void VerifyStringTable() {
+static void VerifyStringTable(Heap* heap) {
StringTableVerifier verifier;
- HEAP->string_table()->IterateElements(&verifier);
+ heap->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
@@ -920,7 +922,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyStringTable();
+ VerifyStringTable(this);
}
#endif
@@ -1027,7 +1029,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
// Update relocatables.
- Relocatable::PostGarbageCollectionProcessing();
+ Relocatable::PostGarbageCollectionProcessing(isolate_);
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
@@ -1044,7 +1046,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyStringTable();
+ VerifyStringTable(this);
}
#endif
@@ -1152,29 +1154,33 @@ class ScavengeVisitor: public ObjectVisitor {
// new space.
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
public:
+ explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
+ CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
}
}
}
+
+ private:
+ Heap* heap_;
};
-static void VerifyNonPointerSpacePointers() {
+static void VerifyNonPointerSpacePointers(Heap* heap) {
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
- VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(HEAP->code_space());
+ VerifyNonPointerSpacePointersVisitor v(heap);
+ HeapObjectIterator code_it(heap->code_space());
for (HeapObject* object = code_it.Next();
object != NULL; object = code_it.Next())
object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
- if (!HEAP->old_data_space()->was_swept_conservatively()) {
- HeapObjectIterator data_it(HEAP->old_data_space());
+ if (!heap->old_data_space()->was_swept_conservatively()) {
+ HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next();
object != NULL; object = data_it.Next())
object->Iterate(&v);
@@ -1321,7 +1327,7 @@ void Heap::Scavenge() {
RelocationLock relocation_lock(this);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
+ if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
gc_state_ = SCAVENGE;
@@ -1616,6 +1622,29 @@ struct WeakListVisitor<JSFunction> {
template<>
+struct WeakListVisitor<Code> {
+ static void SetWeakNext(Code* code, Object* next) {
+ code->set_next_code_link(next);
+ }
+
+ static Object* WeakNext(Code* code) {
+ return code->next_code_link();
+ }
+
+ static int WeakNextOffset() {
+ return Code::kNextCodeLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, Code*,
+ WeakObjectRetainer*, bool) {
+ }
+
+ static void VisitPhantomObject(Heap*, Code*) {
+ }
+};
+
+
+template<>
struct WeakListVisitor<Context> {
static void SetWeakNext(Context* context, Object* next) {
context->set(Context::NEXT_CONTEXT_LINK,
@@ -1631,22 +1660,34 @@ struct WeakListVisitor<Context> {
Context* context,
WeakObjectRetainer* retainer,
bool record_slots) {
- // Process the weak list of optimized functions for the context.
- Object* function_list_head =
- VisitWeakList<JSFunction>(
- heap,
- context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer,
- record_slots);
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
- function_list_head,
- UPDATE_WRITE_BARRIER);
+ // Process the three weak lists linked off the context.
+ DoWeakList<JSFunction>(heap, context, retainer, record_slots,
+ Context::OPTIMIZED_FUNCTIONS_LIST);
+ DoWeakList<Code>(heap, context, retainer, record_slots,
+ Context::OPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, record_slots,
+ Context::DEOPTIMIZED_CODE_LIST);
+ }
+
+ template<class T>
+ static void DoWeakList(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer,
+ bool record_slots,
+ int index) {
+ // Visit the weak list, removing dead intermediate elements.
+ Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
+ record_slots);
+
+ // Update the list head.
+ context->set(index, list_head, UPDATE_WRITE_BARRIER);
+
if (record_slots) {
- Object** optimized_functions =
- HeapObject::RawField(
- context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Record the updated slot if necessary.
+ Object** head_slot = HeapObject::RawField(
+ context, FixedArray::SizeFor(index));
heap->mark_compact_collector()->RecordSlot(
- optimized_functions, optimized_functions, function_list_head);
+ head_slot, head_slot, list_head);
}
}
@@ -2340,7 +2381,7 @@ void Heap::SelectScavengingVisitorsTable() {
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_ASSERT(HEAP->InFromSpace(object));
+ SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
MapWord first_word = object->map_word();
SLOW_ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
@@ -2429,6 +2470,7 @@ MaybeObject* Heap::AllocateAccessorPair() {
}
accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
+ accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
return accessors;
}
@@ -2937,7 +2979,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string,
{ MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return Oddball::cast(result)->Initialize(to_string, to_number, kind);
+ return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
}
@@ -3041,15 +3083,16 @@ bool Heap::CreateInitialObjects() {
// Finish initializing oddballs after creating the string table.
{ MaybeObject* maybe_obj =
- undefined_value()->Initialize("undefined",
+ undefined_value()->Initialize(this,
+ "undefined",
nan_value(),
Oddball::kUndefined);
if (!maybe_obj->ToObject(&obj)) return false;
}
// Initialize the null_value.
- { MaybeObject* maybe_obj =
- null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
+ { MaybeObject* maybe_obj = null_value()->Initialize(
+ this, "null", Smi::FromInt(0), Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3413,7 +3456,7 @@ void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(this, i);
+ number_string_cache()->set_undefined(i);
}
}
@@ -3644,7 +3687,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_function_token_position(0);
// All compiler hints default to false or 0.
share->set_compiler_hints(0);
- share->set_opt_count(0);
+ share->set_opt_count_and_bailout_reason(0);
return share;
}
@@ -4328,7 +4371,7 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
if (!function->shared()->is_generator()) {
MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
constructor_string(), function, DONT_ENUM);
if (maybe_failure->IsFailure()) return maybe_failure;
}
@@ -5725,7 +5768,7 @@ MaybeObject* Heap::AllocateSymbol() {
int hash;
int attempts = 0;
do {
- hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
+ hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
attempts++;
} while (hash == 0 && attempts < 30);
if (hash == 0) hash = 1; // never return 0
@@ -6114,12 +6157,12 @@ void Heap::Print() {
void Heap::ReportCodeStatistics(const char* title) {
PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
- PagedSpace::ResetCodeStatistics();
+ PagedSpace::ResetCodeStatistics(isolate());
// We do not look for code in new space, map space, or old space. If code
// somehow ends up in those spaces, we would miss it here.
code_space_->CollectCodeStatistics();
lo_space_->CollectCodeStatistics();
- PagedSpace::ReportCodeStatistics();
+ PagedSpace::ReportCodeStatistics(isolate());
}
@@ -6167,7 +6210,7 @@ bool Heap::Contains(HeapObject* value) {
bool Heap::Contains(Address addr) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) ||
@@ -6186,7 +6229,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
if (!HasBeenSetUp()) return false;
switch (space) {
@@ -6573,7 +6616,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
v->Synchronize(VisitorSynchronization::kTop);
- Relocatable::Iterate(v);
+ Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -6634,7 +6677,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// serialization this does nothing, since the partial snapshot cache is
// empty. However the next thing we do is create the partial snapshot,
// filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(v);
+ SerializerDeserializer::Iterate(isolate_, v);
// We don't do a v->Synchronize call here, because in debug mode that will
// output a flag to the snapshot. However at this point the serializer and
// deserializer are deliberately a little unsynchronized (see above) so the
@@ -6715,6 +6758,12 @@ bool Heap::ConfigureHeap(int max_semispace_size,
RoundUp(max_old_generation_size_,
Page::kPageSize));
+ // We rely on being able to allocate new arrays in paged spaces.
+ ASSERT(MaxRegularSpaceAllocationSize() >=
+ (JSArray::kSize +
+ FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+ AllocationMemento::kSize));
+
configured_ = true;
return true;
}
@@ -6884,8 +6933,8 @@ bool Heap::SetUp() {
ASSERT(hash_seed() == 0);
if (FLAG_randomize_hashes) {
if (FLAG_hash_seed == 0) {
- set_hash_seed(
- Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
+ int rnd = isolate()->random_number_generator()->NextInt();
+ set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
} else {
set_hash_seed(Smi::FromInt(FLAG_hash_seed));
}
@@ -6896,7 +6945,7 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
- if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+ if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
#ifdef DEBUG
relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
@@ -7232,12 +7281,12 @@ class HeapObjectsFilter {
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
- UnreachableObjectsFilter() {
+ explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
MarkReachableObjects();
}
~UnreachableObjectsFilter() {
- Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
+ heap_->mark_compact_collector()->ClearMarkbits();
}
bool SkipObject(HeapObject* object) {
@@ -7274,12 +7323,12 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
};
void MarkReachableObjects() {
- Heap* heap = Isolate::Current()->heap();
MarkingVisitor visitor;
- heap->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
+ Heap* heap_;
DisallowHeapAllocation no_allocation_;
};
@@ -7311,7 +7360,7 @@ void HeapIterator::Init() {
space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
case kFilterUnreachable:
- filter_ = new UnreachableObjectsFilter;
+ filter_ = new UnreachableObjectsFilter(heap_);
break;
default:
break;
@@ -7796,7 +7845,7 @@ int KeyedLookupCache::Lookup(Map* map, Name* name) {
void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
if (!name->IsUniqueName()) {
String* internalized_string;
- if (!HEAP->InternalizeStringIfExists(
+ if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
String::cast(name), &internalized_string)) {
return;
}
@@ -7804,7 +7853,7 @@ void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
- ASSERT(!HEAP->InNewSpace(name));
+ ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
@@ -7856,9 +7905,9 @@ void Heap::GarbageCollectionGreedyCheck() {
#endif
-TranscendentalCache::SubCache::SubCache(Type t)
+TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
: type_(t),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
@@ -7885,6 +7934,7 @@ void ExternalStringTable::CleanUp() {
if (new_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(new_space_strings_[i]->IsExternalString());
if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
@@ -7899,6 +7949,7 @@ void ExternalStringTable::CleanUp() {
if (old_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(old_space_strings_[i]->IsExternalString());
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
@@ -8001,7 +8052,7 @@ static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
void Heap::CheckpointObjectStats() {
- ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
counters->count_of_##name()->Increment( \
@@ -8047,7 +8098,7 @@ void Heap::CheckpointObjectStats() {
Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
heap_->relocation_mutex_->Lock();
#ifdef DEBUG
heap_->relocation_mutex_locked_by_optimizer_thread_ =
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 3472ec093..4dfa076eb 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -78,9 +78,9 @@ namespace internal {
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
- /* The first 32 roots above this line should be boring from a GC point of */ \
- /* view. This means they are never in new space and never on a page that */ \
- /* is being compacted. */ \
+ /* The roots above this line should be boring from a GC point of view. */ \
+ /* This means they are never in new space and never on a page that is */ \
+ /* being compacted. */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -199,7 +199,6 @@ namespace internal {
V(Array_string, "Array") \
V(Object_string, "Object") \
V(proto_string, "__proto__") \
- V(StringImpl_string, "StringImpl") \
V(arguments_string, "arguments") \
V(Arguments_string, "Arguments") \
V(call_string, "call") \
@@ -209,12 +208,10 @@ namespace internal {
V(Boolean_string, "Boolean") \
V(callee_string, "callee") \
V(constructor_string, "constructor") \
- V(code_string, ".code") \
V(result_string, ".result") \
V(dot_for_string, ".for.") \
- V(catch_var_string, ".catch-var") \
- V(empty_string, "") \
V(eval_string, "eval") \
+ V(empty_string, "") \
V(function_string, "function") \
V(length_string, "length") \
V(module_string, "module") \
@@ -233,12 +230,10 @@ namespace internal {
V(index_string, "index") \
V(last_index_string, "lastIndex") \
V(object_string, "object") \
- V(payload_string, "payload") \
V(literals_string, "literals") \
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
- V(unknown_field_string, "unknownField") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
V(Date_string, "Date") \
@@ -257,7 +252,6 @@ namespace internal {
"KeyedStoreElementMonomorphic") \
V(stack_overflow_string, "kStackOverflowBoilerplate") \
V(illegal_access_string, "illegal access") \
- V(out_of_memory_string, "out-of-memory") \
V(illegal_execution_state_string, "illegal execution state") \
V(get_string, "get") \
V(set_string, "set") \
@@ -266,8 +260,6 @@ namespace internal {
V(length_field_string, "%length") \
V(cell_value_string, "%cell_value") \
V(function_class_string, "Function") \
- V(properties_field_symbol, "%properties") \
- V(payload_field_symbol, "%payload") \
V(illegal_argument_string, "illegal argument") \
V(MakeReferenceError_string, "MakeReferenceError") \
V(MakeSyntaxError_string, "MakeSyntaxError") \
@@ -281,7 +273,6 @@ namespace internal {
V(illegal_continue_string, "illegal_continue") \
V(unknown_label_string, "unknown_label") \
V(redeclaration_string, "redeclaration") \
- V(failure_string, "<failure>") \
V(space_string, " ") \
V(exec_string, "exec") \
V(zero_string, "0") \
@@ -523,7 +514,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
- int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
+ int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -1871,14 +1862,14 @@ class Heap {
void CheckpointObjectStats();
- // We don't use a ScopedLock here since we want to lock the heap
- // only when FLAG_parallel_recompilation is true.
+ // We don't use a LockGuard here since we want to lock the heap
+ // only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
explicit RelocationLock(Heap* heap);
~RelocationLock() {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
#ifdef DEBUG
heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
@@ -2881,7 +2872,7 @@ class TranscendentalCache {
class SubCache {
static const int kCacheSize = 512;
- explicit SubCache(Type t);
+ explicit SubCache(Isolate* isolate, Type t);
MUST_USE_RESULT inline MaybeObject* Get(double input);
@@ -2918,7 +2909,7 @@ class TranscendentalCache {
DISALLOW_COPY_AND_ASSIGN(SubCache);
};
- TranscendentalCache() {
+ explicit TranscendentalCache(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
}
@@ -2936,6 +2927,7 @@ class TranscendentalCache {
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
+ Isolate* isolate_;
SubCache* caches_[kNumberOfCaches];
DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
};
diff --git a/deps/v8/src/hydrogen-alias-analysis.h b/deps/v8/src/hydrogen-alias-analysis.h
new file mode 100644
index 000000000..73e116e63
--- /dev/null
+++ b/deps/v8/src/hydrogen-alias-analysis.h
@@ -0,0 +1,105 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#define V8_HYDROGEN_ALIAS_ANALYSIS_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+enum HAliasing {
+ kMustAlias,
+ kMayAlias,
+ kNoAlias
+};
+
+
+// Defines the interface to alias analysis for the rest of the compiler.
+// A simple implementation can use only local reasoning, but a more powerful
+// analysis might employ points-to analysis.
+class HAliasAnalyzer : public ZoneObject {
+ public:
+ // Simple alias analysis distinguishes allocations, parameters,
+ // and constants using only local reasoning.
+ HAliasing Query(HValue* a, HValue* b) {
+ // The same SSA value always references the same object.
+ if (a == b) return kMustAlias;
+
+ if (a->IsAllocate() || a->IsInnerAllocatedObject()) {
+ // Two non-identical allocations can never be aliases.
+ if (b->IsAllocate()) return kNoAlias;
+ if (b->IsInnerAllocatedObject()) return kNoAlias;
+ // An allocation can never alias a parameter or a constant.
+ if (b->IsParameter()) return kNoAlias;
+ if (b->IsConstant()) return kNoAlias;
+ }
+ if (b->IsAllocate() || b->IsInnerAllocatedObject()) {
+ // An allocation can never alias a parameter or a constant.
+ if (a->IsParameter()) return kNoAlias;
+ if (a->IsConstant()) return kNoAlias;
+ }
+
+ // Constant objects can be distinguished statically.
+ if (a->IsConstant()) {
+ // TODO(titzer): DataEquals() is more efficient, but that's protected.
+ return a->Equals(b) ? kMustAlias : kNoAlias;
+ }
+ return kMayAlias;
+ }
+
+ // Checks whether the objects referred to by the given instructions may
+ // ever be aliases. Note that this is more conservative than checking
+ // {Query(a, b) == kMayAlias}, since this method considers kMustAlias
+ // objects to also be may-aliasing.
+ inline bool MayAlias(HValue* a, HValue* b) {
+ return Query(a, b) != kNoAlias;
+ }
+
+ inline bool MustAlias(HValue* a, HValue* b) {
+ return Query(a, b) == kMustAlias;
+ }
+
+ inline bool NoAlias(HValue* a, HValue* b) {
+ return Query(a, b) == kNoAlias;
+ }
+
+ // Returns the actual value of an instruction. In the case of a chain
+ // of informative definitions, return the root of the chain.
+ HValue* ActualValue(HValue* obj) {
+ while (obj->IsInformativeDefinition()) { // Walk a chain of idefs.
+ obj = obj->RedefinedOperand();
+ }
+ return obj;
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index 7c81ec145..869db54a2 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -318,12 +318,54 @@ void BoundsCheckTable::Delete(BoundsCheckKey* key) {
}
+class HBoundsCheckEliminationState {
+ public:
+ HBasicBlock* block_;
+ BoundsCheckBbData* bb_data_list_;
+ int index_;
+};
+
+
// Eliminates checks in bb and recursively in the dominated blocks.
// Also replace the results of check instructions with the original value, if
// the result is used. This is safe now, since we don't do code motion after
// this point. It enables better register allocation since the value produced
// by check instructions is really a copy of the original value.
void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
+ HBasicBlock* entry) {
+ // Allocate the stack.
+ HBoundsCheckEliminationState* stack =
+ zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
+
+ // Explicitly push the entry block.
+ stack[0].block_ = entry;
+ stack[0].bb_data_list_ = PreProcessBlock(entry);
+ stack[0].index_ = 0;
+ int stack_depth = 1;
+
+ // Implement depth-first traversal with a stack.
+ while (stack_depth > 0) {
+ int current = stack_depth - 1;
+ HBoundsCheckEliminationState* state = &stack[current];
+ const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
+
+ if (state->index_ < children->length()) {
+ // Recursively visit children blocks.
+ HBasicBlock* child = children->at(state->index_++);
+ int next = stack_depth++;
+ stack[next].block_ = child;
+ stack[next].bb_data_list_ = PreProcessBlock(child);
+ stack[next].index_ = 0;
+ } else {
+ // Finished with all children; post process the block.
+ PostProcessBlock(state->block_, state->bb_data_list_);
+ stack_depth--;
+ }
+ }
+}
+
+
+BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
HBasicBlock* bb) {
BoundsCheckBbData* bb_data_list = NULL;
@@ -375,19 +417,20 @@ void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
}
}
- for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
- EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i));
- }
+ return bb_data_list;
+}
+
- for (BoundsCheckBbData* data = bb_data_list;
- data != NULL;
- data = data->NextInBasicBlock()) {
+void HBoundsCheckEliminationPhase::PostProcessBlock(
+ HBasicBlock* block, BoundsCheckBbData* data) {
+ while (data != NULL) {
data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
table_.Delete(data->Key());
}
+ data = data->NextInBasicBlock();
}
}
diff --git a/deps/v8/src/hydrogen-bce.h b/deps/v8/src/hydrogen-bce.h
index d91997bda..c55dea7b7 100644
--- a/deps/v8/src/hydrogen-bce.h
+++ b/deps/v8/src/hydrogen-bce.h
@@ -60,6 +60,8 @@ class HBoundsCheckEliminationPhase : public HPhase {
private:
void EliminateRedundantBoundsChecks(HBasicBlock* bb);
+ BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
+ void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
BoundsCheckTable table_;
diff --git a/deps/v8/src/hydrogen-bch.cc b/deps/v8/src/hydrogen-bch.cc
index 137d62954..a0a0fee71 100644
--- a/deps/v8/src/hydrogen-bch.cc
+++ b/deps/v8/src/hydrogen-bch.cc
@@ -102,10 +102,11 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
int current_dominated_block_;
};
- HGraph* graph() { return graph_; }
- HBasicBlock* loop_header() { return loop_header_; }
- Element* at(int index) { return &(elements_.at(index)); }
- Element* at(HBasicBlock* block) { return at(block->block_id()); }
+ HGraph* graph() const { return graph_; }
+ Counters* counters() const { return graph()->isolate()->counters(); }
+ HBasicBlock* loop_header() const { return loop_header_; }
+ Element* at(int index) const { return &(elements_.at(index)); }
+ Element* at(HBasicBlock* block) const { return at(block->block_id()); }
void AddCheckAt(HBasicBlock* block) {
at(block->block_id())->set_has_check();
@@ -258,23 +259,17 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
// all checks are done on constants: if all check are done against the same
// constant limit we will use that instead of the induction limit.
bool has_upper_constant_limit = true;
- InductionVariableData::InductionVariableCheck* current_check = check;
int32_t upper_constant_limit =
- current_check != NULL && current_check->HasUpperLimit() ?
- current_check->upper_limit() : 0;
- while (current_check != NULL) {
- if (check->HasUpperLimit()) {
- if (check->upper_limit() != upper_constant_limit) {
- has_upper_constant_limit = false;
- }
- } else {
- has_upper_constant_limit = false;
- }
-
- current_check->check()->block()->graph()->isolate()->counters()->
- bounds_checks_eliminated()->Increment();
+ check != NULL && check->HasUpperLimit() ? check->upper_limit() : 0;
+ for (InductionVariableData::InductionVariableCheck* current_check = check;
+ current_check != NULL;
+ current_check = current_check->next()) {
+ has_upper_constant_limit =
+ has_upper_constant_limit &&
+ check->HasUpperLimit() &&
+ check->upper_limit() == upper_constant_limit;
+ counters()->bounds_checks_eliminated()->Increment();
current_check->check()->set_skip_check();
- current_check = current_check->next();
}
// Choose the appropriate limit.
@@ -303,8 +298,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
zone, context, limit, check->check()->length());
hoisted_check->InsertBefore(pre_header->end());
hoisted_check->set_allow_equality(true);
- hoisted_check->block()->graph()->isolate()->counters()->
- bounds_checks_hoisted()->Increment();
+ counters()->bounds_checks_hoisted()->Increment();
}
void CollectInductionVariableData(HBasicBlock* bb) {
@@ -341,8 +335,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
// TODO(mmassi): skip OSR values for check->length().
if (check->length() == data->limit() ||
check->length() == data->additional_upper_limit()) {
- check->block()->graph()->isolate()->counters()->
- bounds_checks_eliminated()->Increment();
+ counters()->bounds_checks_eliminated()->Increment();
check->set_skip_check();
continue;
}
@@ -407,4 +400,3 @@ void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
}
} } // namespace v8::internal
-
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc
index 643234392..4d96415e6 100644
--- a/deps/v8/src/hydrogen-canonicalize.cc
+++ b/deps/v8/src/hydrogen-canonicalize.cc
@@ -48,6 +48,10 @@ void HCanonicalizePhase::Run() {
if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToSmi)) {
instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
+ } else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ // Avoid redundant minus zero check
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
}
}
}
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
index 0359678ef..997e4f944 100644
--- a/deps/v8/src/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -31,21 +31,33 @@ namespace v8 {
namespace internal {
-void HEscapeAnalysisPhase::CollectIfNoEscapingUses(HInstruction* instr) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) {
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->HasEscapingOperandAt(it.index())) {
if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) escapes through #%d (%s) @%d\n", instr->id(),
- instr->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
}
- return;
+ return false;
+ }
+ if (use->HasOutOfBoundsAccess(size)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
+ }
+ int redefined_index = use->RedefinedOperandIndex();
+ if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
}
}
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) is being captured\n", instr->id(), instr->Mnemonic());
- }
- captured_.Add(instr, zone());
+ return true;
}
@@ -55,8 +67,16 @@ void HEscapeAnalysisPhase::CollectCapturedValues() {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->IsAllocate()) {
- CollectIfNoEscapingUses(instr);
+ if (!instr->IsAllocate()) continue;
+ HAllocate* allocate = HAllocate::cast(instr);
+ if (!allocate->size()->IsInteger32Constant()) continue;
+ int size_in_bytes = allocate->size()->GetInteger32Constant();
+ if (HasNoEscapingUses(instr, size_in_bytes)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) is being captured\n", instr->id(),
+ instr->Mnemonic());
+ }
+ captured_.Add(instr, zone());
}
}
}
@@ -65,7 +85,8 @@ void HEscapeAnalysisPhase::CollectCapturedValues() {
HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
Zone* zone = graph()->zone();
- HCapturedObject* state = new(zone) HCapturedObject(number_of_values_, zone);
+ HCapturedObject* state =
+ new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone);
state->InsertAfter(previous);
return state;
}
@@ -85,7 +106,8 @@ HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
// Create a new state full of phis for loop header entries.
HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
- HInstruction* previous, HCapturedObject* old_state) {
+ HInstruction* previous,
+ HCapturedObject* old_state) {
HBasicBlock* block = previous->block();
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
@@ -99,7 +121,8 @@ HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
// Create a new state by copying an existing one.
HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
- HInstruction* previous, HCapturedObject* old_state) {
+ HInstruction* previous,
+ HCapturedObject* old_state) {
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = old_state->OperandAt(index);
@@ -111,8 +134,9 @@ HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
// Insert a newly created phi into the given block and fill all incoming
// edges with the given value.
-HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(
- HBasicBlock* block, HValue* incoming_value, int index) {
+HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
+ HValue* incoming_value,
+ int index) {
Zone* zone = graph()->zone();
HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
for (int i = 0; i < block->predecessors()->length(); i++) {
@@ -123,6 +147,21 @@ HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(
}
+// Insert a newly created value check as a replacement for map checks.
+HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
+ HCheckMaps* mapcheck) {
+ Zone* zone = graph()->zone();
+ HValue* value = state->map_value();
+ // TODO(mstarzinger): This will narrow a map check against a set of maps
+ // down to the first element in the set. Revisit and fix this.
+ Handle<Map> map_object = mapcheck->map_set()->first();
+ UniqueValueId map_id = mapcheck->map_unique_ids()->first();
+ HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id);
+ check->InsertBefore(mapcheck);
+ return check;
+}
+
+
// Performs a forward data-flow analysis of all loads and stores on the
// given captured allocation. This uses a reverse post-order iteration
// over affected basic blocks. All non-escaping instructions are handled
@@ -171,30 +210,24 @@ void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
int index = store->access().offset() / kPointerSize;
if (store->object() != allocate) continue;
ASSERT(store->access().IsInobject());
- state = NewStateCopy(store, state);
+ state = NewStateCopy(store->previous(), state);
state->SetOperandAt(index, store->value());
if (store->has_transition()) {
state->SetOperandAt(0, store->transition());
}
- store->DeleteAndReplaceWith(NULL);
+ if (store->HasObservableSideEffects()) {
+ state->ReuseSideEffectsFromStore(store);
+ }
+ store->DeleteAndReplaceWith(store->ActualValue());
if (FLAG_trace_escape_analysis) {
PrintF("Replacing store #%d%s\n", instr->id(),
store->has_transition() ? " (with transition)" : "");
}
break;
}
- case HValue::kSimulate: {
- HSimulate* simulate = HSimulate::cast(instr);
- // TODO(mstarzinger): This doesn't track deltas for values on the
- // operand stack yet. Find a repro test case and fix this.
- for (int i = 0; i < simulate->OperandCount(); i++) {
- if (simulate->OperandAt(i) != allocate) continue;
- simulate->SetOperandAt(i, state);
- }
- break;
- }
case HValue::kArgumentsObject:
- case HValue::kCapturedObject: {
+ case HValue::kCapturedObject:
+ case HValue::kSimulate: {
for (int i = 0; i < instr->OperandCount(); i++) {
if (instr->OperandAt(i) != allocate) continue;
instr->SetOperandAt(i, state);
@@ -204,23 +237,14 @@ void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
case HValue::kCheckHeapObject: {
HCheckHeapObject* check = HCheckHeapObject::cast(instr);
if (check->value() != allocate) continue;
- check->DeleteAndReplaceWith(NULL);
+ check->DeleteAndReplaceWith(check->ActualValue());
break;
}
case HValue::kCheckMaps: {
HCheckMaps* mapcheck = HCheckMaps::cast(instr);
if (mapcheck->value() != allocate) continue;
- // TODO(mstarzinger): This approach breaks if the tracked map value
- // is not a HConstant. Find a repro test case and fix this.
- for (HUseIterator it(mapcheck->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsLoadNamedField()) continue;
- HLoadNamedField* load = HLoadNamedField::cast(it.value());
- ASSERT(load->typecheck() == mapcheck);
- load->ClearTypeCheck();
- }
- ASSERT(mapcheck->HasNoUses());
-
- mapcheck->DeleteAndReplaceWith(NULL);
+ NewMapCheckAndInsert(state, mapcheck);
+ mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
break;
}
default:
@@ -278,9 +302,9 @@ void HEscapeAnalysisPhase::PerformScalarReplacement() {
HAllocate* allocate = HAllocate::cast(captured_.at(i));
// Compute number of scalar values and start with clean slate.
- if (!allocate->size()->IsInteger32Constant()) continue;
int size_in_bytes = allocate->size()->GetInteger32Constant();
number_of_values_ = size_in_bytes / kPointerSize;
+ number_of_objects_++;
block_states_.Clear();
// Perform actual analysis steps.
@@ -293,4 +317,13 @@ void HEscapeAnalysisPhase::PerformScalarReplacement() {
}
+void HEscapeAnalysisPhase::Run() {
+ // TODO(mstarzinger): We disable escape analysis with OSR for now, because
+ // spill slots might be uninitialized. Needs investigation.
+ if (graph()->has_osr()) return;
+ CollectCapturedValues();
+ PerformScalarReplacement();
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-escape-analysis.h b/deps/v8/src/hydrogen-escape-analysis.h
index 123da214e..3e27cc1b4 100644
--- a/deps/v8/src/hydrogen-escape-analysis.h
+++ b/deps/v8/src/hydrogen-escape-analysis.h
@@ -40,18 +40,16 @@ class HEscapeAnalysisPhase : public HPhase {
explicit HEscapeAnalysisPhase(HGraph* graph)
: HPhase("H_Escape analysis", graph),
captured_(0, zone()),
+ number_of_objects_(0),
number_of_values_(0),
cumulative_values_(0),
block_states_(graph->blocks()->length(), zone()) { }
- void Run() {
- CollectCapturedValues();
- PerformScalarReplacement();
- }
+ void Run();
private:
void CollectCapturedValues();
- void CollectIfNoEscapingUses(HInstruction* instr);
+ bool HasNoEscapingUses(HValue* value, int size);
void PerformScalarReplacement();
void AnalyzeDataFlow(HInstruction* instr);
@@ -62,6 +60,8 @@ class HEscapeAnalysisPhase : public HPhase {
HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
+ HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
+
HCapturedObject* StateAt(HBasicBlock* block) {
return block_states_.at(block->block_id());
}
@@ -73,6 +73,9 @@ class HEscapeAnalysisPhase : public HPhase {
// List of allocations captured during collection phase.
ZoneList<HInstruction*> captured_;
+ // Number of captured objects on which scalar replacement was done.
+ int number_of_objects_;
+
// Number of scalar values tracked during scalar replacement phase.
int number_of_values_;
int cumulative_values_;
diff --git a/deps/v8/src/hydrogen-infer-representation.cc b/deps/v8/src/hydrogen-infer-representation.cc
index 95c341285..f61649a68 100644
--- a/deps/v8/src/hydrogen-infer-representation.cc
+++ b/deps/v8/src/hydrogen-infer-representation.cc
@@ -82,24 +82,36 @@ void HInferRepresentationPhase::Run() {
if (done.Contains(i)) continue;
// Check if all uses of all connected phis in this group are truncating.
- bool all_uses_everywhere_truncating = true;
+ bool all_uses_everywhere_truncating_int32 = true;
+ bool all_uses_everywhere_truncating_smi = true;
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
- all_uses_everywhere_truncating &=
+ all_uses_everywhere_truncating_int32 &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+ all_uses_everywhere_truncating_smi &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
done.Add(index);
}
- if (all_uses_everywhere_truncating) {
- continue; // Great, nothing to do.
+
+ if (!all_uses_everywhere_truncating_int32) {
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ }
}
- // Clear truncation flag of this group of connected phis.
- for (BitVector::Iterator it(connected_phis[i]);
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ if (!all_uses_everywhere_truncating_smi) {
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
+ }
}
}
}
@@ -140,8 +152,8 @@ void HInferRepresentationPhase::Run() {
// Do a fixed point iteration, trying to improve representations
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
current->InferRepresentation(this);
+ in_worklist_.Remove(current->id());
}
// Lastly: any instruction that we don't have representation information
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index e4599e1e8..cca95b9b5 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -397,6 +397,18 @@ bool HValue::CheckUsesForFlag(Flag f) const {
}
+bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
+ if (!it.value()->CheckFlag(f)) {
+ *value = it.value();
+ return false;
+ }
+ }
+ return true;
+}
+
+
bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
bool return_value = false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
@@ -1231,6 +1243,7 @@ HValue* HMod::Canonicalize() {
HValue* HDiv::Canonicalize() {
+ if (IsIdentityOperation(left(), right(), 1)) return left();
return this;
}
@@ -1438,15 +1451,16 @@ void HCheckMaps::PrintDataTo(StringStream* stream) {
}
-void HCheckFunction::PrintDataTo(StringStream* stream) {
+void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" %p", *target());
+ stream->Add(" ");
+ object()->ShortPrint(stream);
}
-HValue* HCheckFunction::Canonicalize() {
+HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->UniqueValueIdsMatch(target_unique_id_))
+ HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_))
? NULL
: this;
}
@@ -1477,6 +1491,15 @@ void HCallStub::PrintDataTo(StringStream* stream) {
}
+void HUnknownOSRValue::PrintDataTo(StringStream *stream) {
+ const char* type = "expression";
+ if (environment_->is_local_index(index_)) type = "local";
+ if (environment_->is_special_index(index_)) type = "special";
+ if (environment_->is_parameter_index(index_)) type = "parameter";
+ stream->Add("%s @ %d", type, index_);
+}
+
+
void HInstanceOf::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
@@ -2289,6 +2312,38 @@ void HSimulate::PrintDataTo(StringStream* stream) {
}
+void HSimulate::ReplayEnvironment(HEnvironment* env) {
+ ASSERT(env != NULL);
+ env->set_ast_id(ast_id());
+ env->Drop(pop_count());
+ for (int i = values()->length() - 1; i >= 0; --i) {
+ HValue* value = values()->at(i);
+ if (HasAssignedIndexAt(i)) {
+ env->Bind(GetAssignedIndexAt(i), value);
+ } else {
+ env->Push(value);
+ }
+ }
+}
+
+
+// Replay captured objects by replacing all captured objects with the
+// same capture id in the current and all outer environments.
+void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
+ ASSERT(env != NULL);
+ while (env != NULL) {
+ for (int i = 0; i < env->length(); ++i) {
+ HValue* value = env->values()->at(i);
+ if (value->IsCapturedObject() &&
+ HCapturedObject::cast(value)->capture_id() == this->capture_id()) {
+ env->SetValueAt(i, this);
+ }
+ }
+ env = env->outer();
+ }
+}
+
+
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
Zone* zone) {
ASSERT(return_target->IsInlineReturnTarget());
@@ -2451,7 +2506,7 @@ static void PrepareConstant(Handle<Object> object) {
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
- if (has_smi_value_ && kSmiValueSize == 31) {
+ if (has_smi_value_ && SmiValuesAre31Bits()) {
r = Representation::Smi();
} else if (has_int32_value_) {
r = Representation::Integer32();
@@ -2529,12 +2584,13 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
HConstant* res = NULL;
- if (handle()->IsBoolean()) {
- res = handle()->BooleanValue() ?
+ Handle<Object> handle = this->handle(zone->isolate());
+ if (handle->IsBoolean()) {
+ res = handle->BooleanValue() ?
new(zone) HConstant(1) : new(zone) HConstant(0);
- } else if (handle()->IsUndefined()) {
+ } else if (handle->IsUndefined()) {
res = new(zone) HConstant(OS::nan_value());
- } else if (handle()->IsNull()) {
+ } else if (handle->IsNull()) {
res = new(zone) HConstant(0);
}
return Maybe<HConstant*>(res != NULL, res);
@@ -2550,7 +2606,7 @@ void HConstant::PrintDataTo(StringStream* stream) {
stream->Add("%p ", reinterpret_cast<void*>(
external_reference_value_.address()));
} else {
- handle()->ShortPrint(stream);
+ handle(Isolate::Current())->ShortPrint(stream);
}
}
@@ -2867,10 +2923,6 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
access_.PrintTo(stream);
- if (HasTypeCheck()) {
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
- }
}
@@ -2884,7 +2936,7 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
check_map->Add(map, zone);
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
- HConstant::cast(value)->InstanceOf(map)) {
+ HConstant::cast(value)->HasMap(map)) {
check_map->omit(info);
}
return check_map;
@@ -3103,6 +3155,7 @@ void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
}
@@ -3402,7 +3455,7 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
zone,
context(),
isolate()->factory()->free_space_map(),
- UniqueValueId(isolate()->heap()->free_space_map()));
+ UniqueValueId::free_space_map(isolate()->heap()));
filler_map->InsertAfter(free_space_instr);
HInstruction* store_map = HStoreNamedField::New(zone, context(),
free_space_instr, HObjectAccess::ForMap(), filler_map);
@@ -3625,7 +3678,7 @@ HInstruction* HStringCharFromCode::New(
Zone* zone, HValue* context, HValue* char_code) {
if (FLAG_fold_constants && char_code->IsConstant()) {
HConstant* c_code = HConstant::cast(char_code);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = zone->isolate();
if (c_code->HasNumberValue()) {
if (std::isfinite(c_code->DoubleValue())) {
uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
@@ -3950,6 +4003,9 @@ Representation HPhi::RepresentationFromInputs() {
Representation HValue::RepresentationFromUseRequirements() {
Representation rep = Representation::None();
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // Ignore the use requirement from never run code
+ if (it.value()->block()->IsDeoptimizing()) continue;
+
// We check for observed_input_representation elsewhere.
Representation use_rep =
it.value()->RequiredInputRepresentation(it.index());
@@ -4010,7 +4066,7 @@ void HCheckHeapObject::Verify() {
}
-void HCheckFunction::Verify() {
+void HCheckValue::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
@@ -4040,6 +4096,15 @@ HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
}
+HObjectAccess HObjectAccess::ForContextSlot(int index) {
+ ASSERT(index >= 0);
+ Portion portion = kInobject;
+ int offset = Context::kHeaderSize + index * kPointerSize;
+ ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
+ return HObjectAccess(portion, offset, Representation::Tagged());
+}
+
+
HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
ASSERT(offset >= 0);
Portion portion = kInobject;
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 60d288525..7d33141a4 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -87,12 +87,12 @@ class LChunkBuilder;
V(CallStub) \
V(CapturedObject) \
V(Change) \
- V(CheckFunction) \
V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
@@ -163,6 +163,7 @@ class LChunkBuilder;
V(Shr) \
V(Simulate) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -209,24 +210,27 @@ class LChunkBuilder;
V(ExternalMemory)
-#define DECLARE_ABSTRACT_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
+#define DECLARE_ABSTRACT_INSTRUCTION(type) \
+ virtual bool Is##type() const V8_FINAL V8_OVERRIDE { return true; } \
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
}
-#define DECLARE_CONCRETE_INSTRUCTION(type) \
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- } \
- virtual Opcode opcode() const { return HValue::k##type; }
+#define DECLARE_CONCRETE_INSTRUCTION(type) \
+ virtual LInstruction* CompileToLithium( \
+ LChunkBuilder* builder) V8_FINAL V8_OVERRIDE; \
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
+ } \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return HValue::k##type; \
+ }
-class Range: public ZoneObject {
+class Range V8_FINAL : public ZoneObject {
public:
Range()
: lower_(kMinInt),
@@ -301,16 +305,12 @@ class Range: public ZoneObject {
};
-class UniqueValueId {
+class UniqueValueId V8_FINAL {
public:
UniqueValueId() : raw_address_(NULL) { }
- explicit UniqueValueId(Object* object) {
- raw_address_ = reinterpret_cast<Address>(object);
- ASSERT(IsInitialized());
- }
-
explicit UniqueValueId(Handle<Object> handle) {
+ ASSERT(!AllowHeapAllocation::IsAllowed());
static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
if (handle.is_null()) {
raw_address_ = kEmptyHandleSentinel;
@@ -338,12 +338,32 @@ class UniqueValueId {
return reinterpret_cast<intptr_t>(raw_address_);
}
+#define IMMOVABLE_UNIQUE_VALUE_ID(name) \
+ static UniqueValueId name(Heap* heap) { return UniqueValueId(heap->name()); }
+
+ IMMOVABLE_UNIQUE_VALUE_ID(free_space_map)
+ IMMOVABLE_UNIQUE_VALUE_ID(minus_zero_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(nan_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(undefined_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(null_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(true_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(false_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(empty_string)
+
+#undef IMMOVABLE_UNIQUE_VALUE_ID
+
private:
Address raw_address_;
+
+ explicit UniqueValueId(Object* object) {
+ raw_address_ = reinterpret_cast<Address>(object);
+ ASSERT(IsInitialized());
+ }
};
-class HType {
+class HType V8_FINAL {
public:
static HType None() { return HType(kNone); }
static HType Tagged() { return HType(kTagged); }
@@ -370,10 +390,6 @@ class HType {
return Combine(other).Equals(other);
}
- bool IsTagged() const {
- return ((type_ & kTagged) == kTagged);
- }
-
bool IsTaggedPrimitive() const {
return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
}
@@ -494,7 +510,7 @@ class HUseListNode: public ZoneObject {
// We reuse use list nodes behind the scenes as uses are added and deleted.
// This class is the safe way to iterate uses while deleting them.
-class HUseIterator BASE_EMBEDDED {
+class HUseIterator V8_FINAL BASE_EMBEDDED {
public:
bool Done() { return current_ == NULL; }
void Advance();
@@ -538,7 +554,7 @@ enum GVNFlag {
};
-class DecompositionResult BASE_EMBEDDED {
+class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -586,7 +602,7 @@ class DecompositionResult BASE_EMBEDDED {
typedef EnumSet<GVNFlag> GVNFlagSet;
-class HValue: public ZoneObject {
+class HValue : public ZoneObject {
public:
static const int kNoNumber = -1;
@@ -802,6 +818,8 @@ class HValue: public ZoneObject {
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;
+ // Same as before and the first one without the flag is returned in value.
+ bool CheckUsesForFlag(Flag f, HValue** value) const;
// Returns true if the flag specified is set for all uses, and this set
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
@@ -861,6 +879,7 @@ class HValue: public ZoneObject {
// Escape analysis helpers.
virtual bool HasEscapingOperandAt(int index) { return true; }
+ virtual bool HasOutOfBoundsAccess(int size) { return false; }
// Representation helpers.
virtual Representation observed_input_representation(int index) {
@@ -878,7 +897,7 @@ class HValue: public ZoneObject {
bool Equals(HValue* other);
virtual intptr_t Hashcode();
- // Compute unique ids upfront that is safe wrt GC and parallel recompilation.
+ // Compute unique ids upfront that is safe wrt GC and concurrent compilation.
virtual void FinalizeUniqueValueId() { }
// Printing support.
@@ -941,6 +960,11 @@ class HValue: public ZoneObject {
return type().ToStringOrToNumberCanBeObserved(representation());
}
+ MinusZeroMode GetMinusZeroMode() {
+ return CheckFlag(kBailoutOnMinusZero)
+ ? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO;
+ }
+
protected:
// This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
@@ -1081,12 +1105,12 @@ class HValue: public ZoneObject {
}
-class HInstruction: public HValue {
+class HInstruction : public HValue {
public:
HInstruction* next() const { return next_; }
HInstruction* previous() const { return previous_; }
- virtual void PrintTo(StringStream* stream);
+ virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream);
bool IsLinked() const { return block() != NULL; }
@@ -1108,7 +1132,7 @@ class HInstruction: public HValue {
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
virtual bool IsCall() { return false; }
@@ -1124,7 +1148,7 @@ class HInstruction: public HValue {
SetGVNFlag(kDependsOnOsrEntries);
}
- virtual void DeleteFromGraph() { Unlink(); }
+ virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
private:
void InitializeAsFirst(HBasicBlock* block) {
@@ -1145,26 +1169,30 @@ class HInstruction: public HValue {
template<int V>
class HTemplateInstruction : public HInstruction {
public:
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return V; }
+ virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ return inputs_[i];
+ }
protected:
HTemplateInstruction(HType type = HType::Tagged()) : HInstruction(type) {}
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
EmbeddedContainer<HValue*, V> inputs_;
};
-class HControlInstruction: public HInstruction {
+class HControlInstruction : public HInstruction {
public:
virtual HBasicBlock* SuccessorAt(int i) = 0;
virtual int SuccessorCount() = 0;
virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HBasicBlock* FirstSuccessor() {
return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
@@ -1177,7 +1205,7 @@ class HControlInstruction: public HInstruction {
};
-class HSuccessorIterator BASE_EMBEDDED {
+class HSuccessorIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HSuccessorIterator(HControlInstruction* instr)
: instr_(instr), current_(0) { }
@@ -1193,18 +1221,22 @@ class HSuccessorIterator BASE_EMBEDDED {
template<int S, int V>
-class HTemplateControlInstruction: public HControlInstruction {
+class HTemplateControlInstruction : public HControlInstruction {
public:
- int SuccessorCount() { return S; }
- HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
- void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
+ int SuccessorCount() V8_OVERRIDE { return S; }
+ HBasicBlock* SuccessorAt(int i) V8_OVERRIDE { return successors_[i]; }
+ void SetSuccessorAt(int i, HBasicBlock* block) V8_OVERRIDE {
+ successors_[i] = block;
+ }
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
+ int OperandCount() V8_OVERRIDE { return V; }
+ HValue* OperandAt(int i) const V8_OVERRIDE { return inputs_[i]; }
protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ void InternalSetOperandAt(int i, HValue* value) V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
EmbeddedContainer<HBasicBlock*, S> successors_;
@@ -1212,9 +1244,9 @@ class HTemplateControlInstruction: public HControlInstruction {
};
-class HBlockEntry: public HTemplateInstruction<0> {
+class HBlockEntry V8_FINAL : public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1222,7 +1254,7 @@ class HBlockEntry: public HTemplateInstruction<0> {
};
-class HDummyUse: public HTemplateInstruction<1> {
+class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
public:
explicit HDummyUse(HValue* value)
: HTemplateInstruction<1>(HType::Smi()) {
@@ -1234,23 +1266,23 @@ class HDummyUse: public HTemplateInstruction<1> {
HValue* value() { return OperandAt(0); }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(DummyUse);
};
-class HDeoptimize: public HTemplateInstruction<0> {
+class HDeoptimize V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HDeoptimize, const char*,
Deoptimizer::BailoutType);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1269,9 +1301,9 @@ class HDeoptimize: public HTemplateInstruction<0> {
// Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak: public HTemplateInstruction<0> {
+class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1279,23 +1311,23 @@ class HDebugBreak: public HTemplateInstruction<0> {
};
-class HGoto: public HTemplateControlInstruction<1, 0> {
+class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
SetSuccessorAt(0, target);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
+class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
public:
HUnaryControlInstruction(HValue* value,
HBasicBlock* true_target,
@@ -1305,13 +1337,13 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
SetSuccessorAt(1, false_target);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
};
-class HBranch: public HUnaryControlInstruction {
+class HBranch V8_FINAL : public HUnaryControlInstruction {
public:
HBranch(HValue* value,
ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
@@ -1322,10 +1354,10 @@ class HBranch: public HUnaryControlInstruction {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual Representation observed_input_representation(int index);
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE;
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1338,7 +1370,7 @@ class HBranch: public HUnaryControlInstruction {
};
-class HCompareMap: public HUnaryControlInstruction {
+class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
HCompareMap(HValue* value,
Handle<Map> map,
@@ -1349,35 +1381,38 @@ class HCompareMap: public HUnaryControlInstruction {
ASSERT(!map.is_null());
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> map() const { return map_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(CompareMap)
+ protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
Handle<Map> map_;
};
-class HContext: public HTemplateInstruction<0> {
+class HContext V8_FINAL : public HTemplateInstruction<0> {
public:
static HContext* New(Zone* zone) {
return new(zone) HContext();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Context)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HContext() {
@@ -1385,11 +1420,11 @@ class HContext: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HReturn: public HTemplateControlInstruction<0, 3> {
+class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -1404,11 +1439,11 @@ class HReturn: public HTemplateControlInstruction<0, 3> {
return new(zone) HReturn(value, context, 0);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
HValue* context() { return OperandAt(1); }
@@ -1425,7 +1460,7 @@ class HReturn: public HTemplateControlInstruction<0, 3> {
};
-class HUnaryOperation: public HTemplateInstruction<1> {
+class HUnaryOperation : public HTemplateInstruction<1> {
public:
HUnaryOperation(HValue* value, HType type = HType::Tagged())
: HTemplateInstruction<1>(type) {
@@ -1437,11 +1472,11 @@ class HUnaryOperation: public HTemplateInstruction<1> {
}
HValue* value() const { return OperandAt(0); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class HThrow: public HTemplateInstruction<2> {
+class HThrow V8_FINAL : public HTemplateInstruction<2> {
public:
static HThrow* New(Zone* zone,
HValue* context,
@@ -1449,7 +1484,7 @@ class HThrow: public HTemplateInstruction<2> {
return new(zone) HThrow(context, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1467,11 +1502,11 @@ class HThrow: public HTemplateInstruction<2> {
};
-class HUseConst: public HUnaryOperation {
+class HUseConst V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1482,19 +1517,20 @@ class HUseConst: public HUnaryOperation {
};
-class HForceRepresentation: public HTemplateInstruction<1> {
+class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HForceRepresentation, HValue*, Representation);
HValue* value() { return OperandAt(0); }
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation(); // Same as the output representation.
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
@@ -1506,7 +1542,7 @@ class HForceRepresentation: public HTemplateInstruction<1> {
};
-class HChange: public HUnaryOperation {
+class HChange V8_FINAL : public HUnaryOperation {
public:
HChange(HValue* value,
Representation to,
@@ -1518,7 +1554,10 @@ class HChange: public HUnaryOperation {
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
- if (is_truncating_to_smi) SetFlag(kTruncatingToSmi);
+ if (is_truncating_to_smi) {
+ SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
+ }
if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
@@ -1532,47 +1571,48 @@ class HChange: public HUnaryOperation {
return CheckUsesForFlag(kAllowUndefinedAsNaN);
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual HType CalculateInferredType();
- virtual HValue* Canonicalize();
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
+ virtual HType CalculateInferredType() V8_OVERRIDE;
+ virtual HValue* Canonicalize() V8_OVERRIDE;
Representation from() const { return value()->representation(); }
Representation to() const { return representation(); }
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return from();
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !from().IsTagged() || value()->type().IsSmi();
}
};
-class HClampToUint8: public HUnaryOperation {
+class HClampToUint8 V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HClampToUint8(HValue* value)
@@ -1582,7 +1622,7 @@ class HClampToUint8: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -1592,7 +1632,7 @@ enum RemovableSimulate {
};
-class HSimulate: public HInstruction {
+class HSimulate V8_FINAL : public HInstruction {
public:
HSimulate(BailoutId ast_id,
int pop_count,
@@ -1604,9 +1644,9 @@ class HSimulate: public HInstruction {
assigned_indexes_(2, zone),
zone_(zone),
removable_(removable) {}
- virtual ~HSimulate() {}
+ ~HSimulate() {}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool HasAstId() const { return !ast_id_.IsNone(); }
BailoutId ast_id() const { return ast_id_; }
@@ -1636,27 +1676,32 @@ class HSimulate: public HInstruction {
}
return -1;
}
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
+ virtual int OperandCount() V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ return values_[index];
+ }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
void MergeWith(ZoneList<HSimulate*>* list);
bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
+ // Replay effects of this instruction on the given environment.
+ void ReplayEnvironment(HEnvironment* env);
+
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
Handle<JSFunction> closure() const { return closure_; }
#endif
protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
values_[index] = value;
}
@@ -1689,7 +1734,7 @@ class HSimulate: public HInstruction {
};
-class HEnvironmentMarker: public HTemplateInstruction<1> {
+class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
public:
enum Kind { BIND, LOOKUP };
@@ -1703,11 +1748,11 @@ class HEnvironmentMarker: public HTemplateInstruction<1> {
next_simulate_ = simulate;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
#ifdef DEBUG
void set_closure(Handle<JSFunction> closure) {
@@ -1731,7 +1776,7 @@ class HEnvironmentMarker: public HTemplateInstruction<1> {
};
-class HStackCheck: public HTemplateInstruction<1> {
+class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
public:
enum Type {
kFunctionEntry,
@@ -1742,7 +1787,7 @@ class HStackCheck: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1781,7 +1826,7 @@ enum InliningKind {
class HArgumentsObject;
-class HEnterInlined: public HTemplateInstruction<0> {
+class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
public:
static HEnterInlined* New(Zone* zone,
HValue* context,
@@ -1800,7 +1845,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> closure() const { return closure_; }
int arguments_count() const { return arguments_count_; }
@@ -1810,7 +1855,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
InliningKind inlining_kind() const { return inlining_kind_; }
bool undefined_receiver() const { return undefined_receiver_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1851,11 +1896,11 @@ class HEnterInlined: public HTemplateInstruction<0> {
};
-class HLeaveInlined: public HTemplateInstruction<0> {
+class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
public:
HLeaveInlined() { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1863,11 +1908,11 @@ class HLeaveInlined: public HTemplateInstruction<0> {
};
-class HPushArgument: public HUnaryOperation {
+class HPushArgument V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HPushArgument, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1882,39 +1927,39 @@ class HPushArgument: public HUnaryOperation {
};
-class HThisFunction: public HTemplateInstruction<0> {
+class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
public:
HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HOuterContext: public HUnaryOperation {
+class HOuterContext V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HOuterContext, HValue*);
DECLARE_CONCRETE_INSTRUCTION(OuterContext);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
@@ -1922,11 +1967,11 @@ class HOuterContext: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDeclareGlobals: public HUnaryOperation {
+class HDeclareGlobals V8_FINAL : public HUnaryOperation {
public:
HDeclareGlobals(HValue* context,
Handle<FixedArray> pairs,
@@ -1951,7 +1996,7 @@ class HDeclareGlobals: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1961,7 +2006,7 @@ class HDeclareGlobals: public HUnaryOperation {
};
-class HGlobalObject: public HUnaryOperation {
+class HGlobalObject V8_FINAL : public HUnaryOperation {
public:
explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
set_representation(Representation::Tagged());
@@ -1974,30 +2019,30 @@ class HGlobalObject: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HGlobalReceiver: public HUnaryOperation {
+class HGlobalReceiver V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HGlobalReceiver, HValue*);
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HGlobalReceiver(HValue* global_object)
@@ -2006,12 +2051,12 @@ class HGlobalReceiver: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
template <int V>
-class HCall: public HTemplateInstruction<V> {
+class HCall : public HTemplateInstruction<V> {
public:
// The argument count includes the receiver.
explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
@@ -2019,35 +2064,38 @@ class HCall: public HTemplateInstruction<V> {
this->SetAllSideEffects();
}
- virtual HType CalculateInferredType() { return HType::Tagged(); }
+ virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ return HType::Tagged();
+ }
virtual int argument_count() const { return argument_count_; }
- virtual bool IsCall() { return true; }
+ virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
private:
int argument_count_;
};
-class HUnaryCall: public HCall<1> {
+class HUnaryCall : public HCall<1> {
public:
HUnaryCall(HValue* value, int argument_count)
: HCall<1>(argument_count) {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
};
-class HBinaryCall: public HCall<2> {
+class HBinaryCall : public HCall<2> {
public:
HBinaryCall(HValue* first, HValue* second, int argument_count)
: HCall<2>(argument_count) {
@@ -2055,9 +2103,10 @@ class HBinaryCall: public HCall<2> {
SetOperandAt(1, second);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2066,7 +2115,7 @@ class HBinaryCall: public HCall<2> {
};
-class HInvokeFunction: public HBinaryCall {
+class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
HInvokeFunction(HValue* context, HValue* function, int argument_count)
: HBinaryCall(context, function, argument_count) {
@@ -2098,10 +2147,6 @@ class HInvokeFunction: public HBinaryCall {
known_function, argument_count);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
HValue* context() { return first(); }
HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
@@ -2115,7 +2160,7 @@ class HInvokeFunction: public HBinaryCall {
};
-class HCallConstantFunction: public HCall<0> {
+class HCallConstantFunction V8_FINAL : public HCall<0> {
public:
HCallConstantFunction(Handle<JSFunction> function, int argument_count)
: HCall<0>(argument_count),
@@ -2127,12 +2172,12 @@ class HCallConstantFunction: public HCall<0> {
bool IsApplyFunction() const {
return function_->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kFunctionApply);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2144,16 +2189,12 @@ class HCallConstantFunction: public HCall<0> {
};
-class HCallKeyed: public HBinaryCall {
+class HCallKeyed V8_FINAL : public HBinaryCall {
public:
HCallKeyed(HValue* context, HValue* key, int argument_count)
: HBinaryCall(context, key, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
HValue* context() { return first(); }
HValue* key() { return second(); }
@@ -2161,29 +2202,25 @@ class HCallKeyed: public HBinaryCall {
};
-class HCallNamed: public HUnaryCall {
+class HCallNamed V8_FINAL : public HUnaryCall {
public:
HCallNamed(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
private:
Handle<String> name_;
};
-class HCallFunction: public HBinaryCall {
+class HCallFunction V8_FINAL : public HBinaryCall {
public:
HCallFunction(HValue* context, HValue* function, int argument_count)
: HBinaryCall(context, function, argument_count) {
@@ -2199,15 +2236,11 @@ class HCallFunction: public HBinaryCall {
HValue* context() { return first(); }
HValue* function() { return second(); }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
};
-class HCallGlobal: public HUnaryCall {
+class HCallGlobal V8_FINAL : public HUnaryCall {
public:
HCallGlobal(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
@@ -2220,15 +2253,11 @@ class HCallGlobal: public HUnaryCall {
return new(zone) HCallGlobal(context, name, argument_count);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
@@ -2236,19 +2265,19 @@ class HCallGlobal: public HUnaryCall {
};
-class HCallKnownGlobal: public HCall<0> {
+class HCallKnownGlobal V8_FINAL : public HCall<0> {
public:
HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
: HCall<0>(argument_count),
target_(target),
formal_parameter_count_(target->shared()->formal_parameter_count()) { }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> target() const { return target_; }
int formal_parameter_count() const { return formal_parameter_count_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2260,15 +2289,10 @@ class HCallKnownGlobal: public HCall<0> {
};
-class HCallNew: public HBinaryCall {
+class HCallNew V8_FINAL : public HBinaryCall {
public:
HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ : HBinaryCall(context, constructor, argument_count) {}
HValue* context() { return first(); }
HValue* constructor() { return second(); }
@@ -2277,15 +2301,18 @@ class HCallNew: public HBinaryCall {
};
-class HCallNewArray: public HCallNew {
+class HCallNewArray V8_FINAL : public HBinaryCall {
public:
HCallNewArray(HValue* context, HValue* constructor, int argument_count,
Handle<Cell> type_cell, ElementsKind elements_kind)
- : HCallNew(context, constructor, argument_count),
+ : HBinaryCall(context, constructor, argument_count),
elements_kind_(elements_kind),
type_cell_(type_cell) {}
- virtual void PrintDataTo(StringStream* stream);
+ HValue* context() { return first(); }
+ HValue* constructor() { return second(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Cell> property_cell() const {
return type_cell_;
@@ -2301,7 +2328,7 @@ class HCallNewArray: public HCallNew {
};
-class HCallRuntime: public HCall<1> {
+class HCallRuntime V8_FINAL : public HCall<1> {
public:
static HCallRuntime* New(Zone* zone,
HValue* context,
@@ -2311,13 +2338,13 @@ class HCallRuntime: public HCall<1> {
return new(zone) HCallRuntime(context, name, c_function, argument_count);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2337,18 +2364,18 @@ class HCallRuntime: public HCall<1> {
};
-class HMapEnumLength: public HUnaryOperation {
+class HMapEnumLength V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HMapEnumLength(HValue* value)
@@ -2358,11 +2385,11 @@ class HMapEnumLength: public HUnaryOperation {
SetGVNFlag(kDependsOnMaps);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HElementsKind: public HUnaryOperation {
+class HElementsKind V8_FINAL : public HUnaryOperation {
public:
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
@@ -2370,21 +2397,21 @@ class HElementsKind: public HUnaryOperation {
SetGVNFlag(kDependsOnElementsKind);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HUnaryMathOperation: public HTemplateInstruction<2> {
+class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -2394,11 +2421,12 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -2422,10 +2450,10 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual HValue* Canonicalize();
- virtual Representation RepresentationFromInputs();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2433,7 +2461,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
return op_ == b->op();
}
@@ -2475,28 +2503,28 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
BuiltinFunctionId op_;
};
-class HLoadExternalArrayPointer: public HUnaryOperation {
+class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::None();
}
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HLoadExternalArrayPointer(HValue* value)
@@ -2509,11 +2537,11 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HCheckMaps: public HTemplateInstruction<2> {
+class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, CompilationInfo* info,
@@ -2531,27 +2559,28 @@ class HCheckMaps: public HTemplateInstruction<2> {
bool CanOmitMapChecks() { return omit_; }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator);
- virtual void PrintDataTo(StringStream* stream);
+ HValue* dominator) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
SmallMapList* map_set() { return &map_set_; }
+ ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; }
bool has_migration_target() {
return has_migration_target_;
}
- virtual void FinalizeUniqueValueId();
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
HCheckMaps* b = HCheckMaps::cast(other);
// Relies on the fact that map_set has been sorted before.
@@ -2566,6 +2595,8 @@ class HCheckMaps: public HTemplateInstruction<2> {
return true;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
void Add(Handle<Map> map, Zone* zone) {
map_set_.Add(map, zone);
@@ -2594,6 +2625,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
omit_ = true;
for (int i = 0; i < map_set_.length(); i++) {
Handle<Map> map = map_set_.at(i);
+ if (!map->CanTransition()) continue;
map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
info);
}
@@ -2606,52 +2638,62 @@ class HCheckMaps: public HTemplateInstruction<2> {
};
-class HCheckFunction: public HUnaryOperation {
+class HCheckValue V8_FINAL : public HUnaryOperation {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCheckFunction, HValue*, Handle<JSFunction>);
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<JSFunction> target) {
+ bool in_new_space = zone->isolate()->heap()->InNewSpace(*target);
+ HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
+ return check;
+ }
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<Map> map, UniqueValueId id) {
+ HCheckValue* check = new(zone) HCheckValue(value, map, false);
+ check->object_unique_id_ = id;
+ return check;
+ }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
- virtual void FinalizeUniqueValueId() {
- target_unique_id_ = UniqueValueId(target_);
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ object_unique_id_ = UniqueValueId(object_);
}
- Handle<JSFunction> target() const { return target_; }
- bool target_in_new_space() const { return target_in_new_space_; }
+ Handle<HeapObject> object() const { return object_; }
+ bool object_in_new_space() const { return object_in_new_space_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue)
protected:
- virtual bool DataEquals(HValue* other) {
- HCheckFunction* b = HCheckFunction::cast(other);
- return target_unique_id_ == b->target_unique_id_;
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HCheckValue* b = HCheckValue::cast(other);
+ return object_unique_id_ == b->object_unique_id_;
}
private:
- HCheckFunction(HValue* value, Handle<JSFunction> function)
+ HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space)
: HUnaryOperation(value, value->type()),
- target_(function), target_unique_id_() {
+ object_(object), object_in_new_space_(in_new_space) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
}
- Handle<JSFunction> target_;
- UniqueValueId target_unique_id_;
- bool target_in_new_space_;
+ Handle<HeapObject> object_;
+ UniqueValueId object_unique_id_;
+ bool object_in_new_space_;
};
-class HCheckInstanceType: public HUnaryOperation {
+class HCheckInstanceType V8_FINAL : public HUnaryOperation {
public:
static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
@@ -2667,13 +2709,13 @@ class HCheckInstanceType: public HUnaryOperation {
return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -2685,11 +2727,13 @@ class HCheckInstanceType: public HUnaryOperation {
// TODO(ager): It could be nice to allow the ommision of instance
// type checks if we have already performed an instance type check
// with a larger range.
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HCheckInstanceType* b = HCheckInstanceType::cast(other);
return check_ == b->check_;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
enum Check {
IS_SPEC_OBJECT,
@@ -2711,15 +2755,15 @@ class HCheckInstanceType: public HUnaryOperation {
};
-class HCheckSmi: public HUnaryOperation {
+class HCheckSmi V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HValue* Canonicalize() {
+ virtual HValue* Canonicalize() V8_OVERRIDE {
HType value_type = value()->type();
if (value_type.IsSmi()) {
return NULL;
@@ -2730,7 +2774,7 @@ class HCheckSmi: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
@@ -2740,14 +2784,14 @@ class HCheckSmi: public HUnaryOperation {
};
-class HIsNumberAndBranch: public HUnaryControlInstruction {
+class HIsNumberAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsNumberAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) {
SetFlag(kFlexibleRepresentation);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2755,27 +2799,27 @@ class HIsNumberAndBranch: public HUnaryControlInstruction {
};
-class HCheckHeapObject: public HUnaryOperation {
+class HCheckHeapObject V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
- virtual HValue* Canonicalize() {
+ virtual HValue* Canonicalize() V8_OVERRIDE {
return value()->type().IsHeapObject() ? NULL : this;
}
DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HCheckHeapObject(HValue* value)
@@ -2807,7 +2851,7 @@ class HConstant;
class HBitwise;
-class InductionVariableData : public ZoneObject {
+class InductionVariableData V8_FINAL : public ZoneObject {
public:
class InductionVariableCheck : public ZoneObject {
public:
@@ -3007,7 +3051,7 @@ class InductionVariableData : public ZoneObject {
};
-class HPhi: public HValue {
+class HPhi V8_FINAL : public HValue {
public:
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
@@ -3023,19 +3067,22 @@ class HPhi: public HValue {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual Representation RepresentationFromInputs();
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
- virtual Range* InferRange(Zone* zone);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual Representation KnownOptimalRepresentation() {
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
return representation();
}
- virtual HType CalculateInferredType();
- virtual int OperandCount() { return inputs_.length(); }
- virtual HValue* OperandAt(int index) const { return inputs_[index]; }
+ virtual HType CalculateInferredType() V8_OVERRIDE;
+ virtual int OperandCount() V8_OVERRIDE { return inputs_.length(); }
+ virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ return inputs_[index];
+ }
HValue* GetRedundantReplacement();
void AddInput(HValue* value);
bool HasRealUses();
@@ -3060,10 +3107,10 @@ class HPhi: public HValue {
induction_variable_data_ = InductionVariableData::ExaminePhi(this);
}
- virtual void PrintTo(StringStream* stream);
+ virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
void InitRealUses(int id);
@@ -3100,7 +3147,7 @@ class HPhi: public HValue {
ASSERT(value->IsPhi());
return reinterpret_cast<HPhi*>(value);
}
- virtual Opcode opcode() const { return HValue::kPhi; }
+ virtual Opcode opcode() const V8_OVERRIDE { return HValue::kPhi; }
void SimplifyConstantInputs();
@@ -3108,8 +3155,8 @@ class HPhi: public HValue {
static const int kInvalidMergedIndex = -1;
protected:
- virtual void DeleteFromGraph();
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void DeleteFromGraph() V8_OVERRIDE;
+ virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
inputs_[index] = value;
}
@@ -3123,25 +3170,31 @@ class HPhi: public HValue {
InductionVariableData* induction_variable_data_;
// TODO(titzer): we can't eliminate the receiver for generating backtraces
- virtual bool IsDeletable() const { return !IsReceiver(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !IsReceiver(); }
};
// Common base class for HArgumentsObject and HCapturedObject.
-class HDematerializedObject: public HTemplateInstruction<0> {
+class HDematerializedObject : public HInstruction {
public:
HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ return values_[index];
+ }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_FINAL V8_OVERRIDE {
+ return false;
+ }
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::None();
}
protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void InternalSetOperandAt(int index,
+ HValue* value) V8_FINAL V8_OVERRIDE {
values_[index] = value;
}
@@ -3149,11 +3202,11 @@ class HDematerializedObject: public HTemplateInstruction<0> {
ZoneList<HValue*> values_;
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
};
-class HArgumentsObject: public HDematerializedObject {
+class HArgumentsObject V8_FINAL : public HDematerializedObject {
public:
static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
return new(zone) HArgumentsObject(count, zone);
@@ -3180,10 +3233,10 @@ class HArgumentsObject: public HDematerializedObject {
};
-class HCapturedObject: public HDematerializedObject {
+class HCapturedObject V8_FINAL : public HDematerializedObject {
public:
- HCapturedObject(int length, Zone* zone)
- : HDematerializedObject(length, zone) {
+ HCapturedObject(int length, int id, Zone* zone)
+ : HDematerializedObject(length, zone), capture_id_(id) {
set_representation(Representation::Tagged());
values_.AddBlock(NULL, length, zone); // Resize list.
}
@@ -3193,12 +3246,28 @@ class HCapturedObject: public HDematerializedObject {
// properties or elements backing store are not tracked here.
const ZoneList<HValue*>* values() const { return &values_; }
int length() const { return values_.length(); }
+ int capture_id() const { return capture_id_; }
+
+ // Shortcut for the map value of this captured object.
+ HValue* map_value() const { return values()->first(); }
+
+ void ReuseSideEffectsFromStore(HInstruction* store) {
+ ASSERT(store->HasObservableSideEffects());
+ ASSERT(store->IsStoreNamedField());
+ gvn_flags_.Add(store->gvn_flags());
+ }
+
+ // Replay effects of this instruction on the given environment.
+ void ReplayEnvironment(HEnvironment* env);
DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
+
+ private:
+ int capture_id_;
};
-class HConstant: public HTemplateInstruction<0> {
+class HConstant V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
@@ -3229,9 +3298,9 @@ class HConstant: public HTemplateInstruction<0> {
return new_constant;
}
- Handle<Object> handle() {
+ Handle<Object> handle(Isolate* isolate) {
if (handle_.is_null()) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = isolate->factory();
// Default arguments to is_not_in_new_space depend on this heap number
// to be tenured so that it's guaranteed not be be located in new space.
handle_ = factory->NewNumber(double_value_, TENURED);
@@ -3241,10 +3310,10 @@ class HConstant: public HTemplateInstruction<0> {
return handle_;
}
- bool InstanceOf(Handle<Map> map) {
- Handle<Object> constant_object = handle();
- return constant_object->IsJSObject() &&
- Handle<JSObject>::cast(constant_object)->map() == *map;
+ bool HasMap(Handle<Map> map) {
+ Handle<Object> constant_object = handle(map->GetIsolate());
+ return constant_object->IsHeapObject() &&
+ Handle<HeapObject>::cast(constant_object)->map() == *map;
}
bool IsSpecialDouble() const {
@@ -3274,35 +3343,34 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(!handle_.is_null());
Heap* heap = isolate()->heap();
- ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value()));
- ASSERT(unique_id_ != UniqueValueId(heap->nan_value()));
- return unique_id_ == UniqueValueId(heap->undefined_value()) ||
- unique_id_ == UniqueValueId(heap->null_value()) ||
- unique_id_ == UniqueValueId(heap->true_value()) ||
- unique_id_ == UniqueValueId(heap->false_value()) ||
- unique_id_ == UniqueValueId(heap->the_hole_value()) ||
- unique_id_ == UniqueValueId(heap->empty_string());
+ ASSERT(unique_id_ != UniqueValueId::minus_zero_value(heap));
+ ASSERT(unique_id_ != UniqueValueId::nan_value(heap));
+ return unique_id_ == UniqueValueId::undefined_value(heap) ||
+ unique_id_ == UniqueValueId::null_value(heap) ||
+ unique_id_ == UniqueValueId::true_value(heap) ||
+ unique_id_ == UniqueValueId::false_value(heap) ||
+ unique_id_ == UniqueValueId::the_hole_value(heap) ||
+ unique_id_ == UniqueValueId::empty_string(heap);
}
bool IsCell() const {
return is_cell_;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual Representation KnownOptimalRepresentation() {
- if (HasSmiValue() && kSmiValueSize == 31) return Representation::Smi();
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
if (HasInteger32Value()) return Representation::Integer32();
if (HasNumberValue()) return Representation::Double();
if (HasExternalReferenceValue()) return Representation::External();
return Representation::Tagged();
}
- virtual bool EmitAtUses();
- virtual void PrintDataTo(StringStream* stream);
- bool IsInteger() { return handle()->IsSmi(); }
+ virtual bool EmitAtUses() V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
@@ -3358,7 +3426,7 @@ class HConstant: public HTemplateInstruction<0> {
bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return boolean_value_; }
- virtual intptr_t Hashcode() {
+ virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
return static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
@@ -3371,7 +3439,7 @@ class HConstant: public HTemplateInstruction<0> {
}
}
- virtual void FinalizeUniqueValueId() {
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
if (!has_double_value_ && !has_external_reference_value_) {
ASSERT(!handle_.is_null());
unique_id_ = UniqueValueId(handle_);
@@ -3384,15 +3452,15 @@ class HConstant: public HTemplateInstruction<0> {
}
#ifdef DEBUG
- virtual void Verify() { }
+ virtual void Verify() V8_OVERRIDE { }
#endif
DECLARE_CONCRETE_INSTRUCTION(Constant)
protected:
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
return other_constant->has_int32_value_ &&
@@ -3437,7 +3505,7 @@ class HConstant: public HTemplateInstruction<0> {
void Initialize(Representation r);
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
// If this is a numerical constant, handle_ either points to to the
// HeapObject the constant originated from or is null. If the
@@ -3465,7 +3533,7 @@ class HConstant: public HTemplateInstruction<0> {
};
-class HBinaryOperation: public HTemplateInstruction<3> {
+class HBinaryOperation : public HTemplateInstruction<3> {
public:
HBinaryOperation(HValue* context, HValue* left, HValue* right,
HType type = HType::Tagged())
@@ -3515,29 +3583,30 @@ class HBinaryOperation: public HTemplateInstruction<3> {
observed_output_representation_ = observed;
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
if (index == 0) return Representation::Tagged();
return observed_input_representation_[index - 1];
}
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
? Representation::Integer32() : new_rep;
HValue::UpdateRepresentation(rep, h_infer, reason);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
- virtual Representation RepresentationFromInputs();
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
Representation RepresentationFromOutput();
- virtual void AssumeRepresentation(Representation r);
+ virtual void AssumeRepresentation(Representation r) V8_OVERRIDE;
virtual bool IsCommutative() const { return false; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) return Representation::Tagged();
return representation();
}
@@ -3552,20 +3621,20 @@ class HBinaryOperation: public HTemplateInstruction<3> {
};
-class HWrapReceiver: public HTemplateInstruction<2> {
+class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* receiver() { return OperandAt(0); }
HValue* function() { return OperandAt(1); }
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
@@ -3578,7 +3647,7 @@ class HWrapReceiver: public HTemplateInstruction<2> {
};
-class HApplyArguments: public HTemplateInstruction<4> {
+class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
public:
HApplyArguments(HValue* function,
HValue* receiver,
@@ -3592,7 +3661,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The length is untagged, all other inputs are tagged.
return (index == 2)
? Representation::Integer32()
@@ -3608,20 +3677,20 @@ class HApplyArguments: public HTemplateInstruction<4> {
};
-class HArgumentsElements: public HTemplateInstruction<0> {
+class HArgumentsElements V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
bool from_inlined() const { return from_inlined_; }
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
@@ -3631,24 +3700,24 @@ class HArgumentsElements: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
bool from_inlined_;
};
-class HArgumentsLength: public HUnaryOperation {
+class HArgumentsLength V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
@@ -3656,11 +3725,11 @@ class HArgumentsLength: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HAccessArgumentsAt: public HTemplateInstruction<3> {
+class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
public:
HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
set_representation(Representation::Tagged());
@@ -3670,9 +3739,9 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
SetOperandAt(2, index);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The arguments elements is considered tagged.
return index == 0
? Representation::Tagged()
@@ -3685,14 +3754,14 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
class HBoundsCheckBaseIndexInformation;
-class HBoundsCheck: public HTemplateInstruction<2> {
+class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
@@ -3721,27 +3790,30 @@ class HBoundsCheck: public HTemplateInstruction<2> {
}
}
- virtual Representation RequiredInputRepresentation(int arg_index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual void PrintDataTo(StringStream* stream);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
bool allow_equality() { return allow_equality_; }
void set_allow_equality(bool v) { allow_equality_ = v; }
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return skip_check(); }
+ virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE {
+ return skip_check();
+ }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
friend class HBoundsCheckBaseIndexInformation;
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
bool skip_check_;
HValue* base_;
int offset_;
@@ -3763,13 +3835,14 @@ class HBoundsCheck: public HTemplateInstruction<2> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return skip_check() && !FLAG_debug_code;
}
};
-class HBoundsCheckBaseIndexInformation: public HTemplateInstruction<2> {
+class HBoundsCheckBaseIndexInformation V8_FINAL
+ : public HTemplateInstruction<2> {
public:
explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
DecompositionResult decomposition;
@@ -3786,18 +3859,18 @@ class HBoundsCheckBaseIndexInformation: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
- virtual Representation RequiredInputRepresentation(int arg_index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return true; }
+ virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE { return true; }
};
-class HBitwiseBinaryOperation: public HBinaryOperation {
+class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
HType type = HType::Tagged())
@@ -3808,7 +3881,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
SetAllSideEffects();
}
- virtual void RepresentationChanged(Representation to) {
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (!to.IsTagged()) {
ASSERT(to.IsSmiOrInteger32());
ClearAllSideEffects();
@@ -3821,13 +3894,13 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
// We only generate either int32 or generic tagged bitwise operations.
if (new_rep.IsDouble()) new_rep = Representation::Integer32();
HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
Representation r = HBinaryOperation::observed_input_representation(index);
if (r.IsDouble()) return Representation::Integer32();
return r;
@@ -3841,11 +3914,11 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HMathFloorOfDiv: public HBinaryOperation {
+class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
public:
static HMathFloorOfDiv* New(Zone* zone,
HValue* context,
@@ -3854,16 +3927,13 @@ class HMathFloorOfDiv: public HBinaryOperation {
return new(zone) HMathFloorOfDiv(context, left, right);
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
@@ -3877,11 +3947,11 @@ class HMathFloorOfDiv: public HBinaryOperation {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HArithmeticBinaryOperation: public HBinaryOperation {
+class HArithmeticBinaryOperation : public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right, HType::TaggedNumber()) {
@@ -3890,7 +3960,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual void RepresentationChanged(Representation to) {
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (to.IsTagged()) {
SetAllSideEffects();
ClearFlag(kUseGVN);
@@ -3903,11 +3973,11 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HCompareGeneric: public HBinaryOperation {
+class HCompareGeneric V8_FINAL : public HBinaryOperation {
public:
HCompareGeneric(HValue* context,
HValue* left,
@@ -3920,14 +3990,14 @@ class HCompareGeneric: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Tagged()
: representation();
}
Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
@@ -3936,7 +4006,7 @@ class HCompareGeneric: public HBinaryOperation {
};
-class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
+class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
@@ -3956,15 +4026,16 @@ class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
observed_input_representation_[1] = right;
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return observed_input_representation_[index];
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
@@ -3974,7 +4045,8 @@ class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
};
-class HCompareHoleAndBranch: public HTemplateControlInstruction<2, 1> {
+class HCompareHoleAndBranch V8_FINAL
+ : public HTemplateControlInstruction<2, 1> {
public:
// TODO(danno): make this private when the IfBuilder properly constructs
// control flow instructions.
@@ -3988,19 +4060,20 @@ class HCompareHoleAndBranch: public HTemplateControlInstruction<2, 1> {
HValue* object() { return OperandAt(0); }
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
};
-class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
+class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
// TODO(danno): make this private when the IfBuilder properly constructs
// control flow instructions.
@@ -4015,13 +4088,13 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4029,24 +4102,24 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
};
-class HIsObjectAndBranch: public HUnaryControlInstruction {
+class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsObjectAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
};
-class HIsStringAndBranch: public HUnaryControlInstruction {
+class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsStringAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4054,28 +4127,28 @@ class HIsStringAndBranch: public HUnaryControlInstruction {
};
-class HIsSmiAndBranch: public HUnaryControlInstruction {
+class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsSmiAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
-class HIsUndetectableAndBranch: public HUnaryControlInstruction {
+class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsUndetectableAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4083,7 +4156,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction {
};
-class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
public:
HStringCompareAndBranch(HValue* context,
HValue* left,
@@ -4103,9 +4176,9 @@ class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
HValue* right() { return OperandAt(2); }
Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4120,9 +4193,9 @@ class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
};
-class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
+class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -4130,7 +4203,7 @@ class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
};
-class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
+class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
: HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
@@ -4142,9 +4215,9 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
InstanceType from() { return from_; }
InstanceType to() { return to_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4156,12 +4229,12 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
};
-class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
+class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HHasCachedArrayIndexAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4169,28 +4242,28 @@ class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
};
-class HGetCachedArrayIndex: public HUnaryOperation {
+class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
public:
explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HClassOfTestAndBranch: public HUnaryControlInstruction {
+class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
: HUnaryControlInstruction(value, NULL, NULL),
@@ -4198,11 +4271,11 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> class_name() const { return class_name_; }
@@ -4211,18 +4284,18 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction {
};
-class HTypeofIsAndBranch: public HUnaryControlInstruction {
+class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
type_literal_(type_literal) { }
Handle<String> type_literal() { return type_literal_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4231,7 +4304,7 @@ class HTypeofIsAndBranch: public HUnaryControlInstruction {
};
-class HInstanceOf: public HBinaryOperation {
+class HInstanceOf V8_FINAL : public HBinaryOperation {
public:
HInstanceOf(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right, HType::Boolean()) {
@@ -4239,17 +4312,17 @@ class HInstanceOf: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
};
-class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
+class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
public:
HInstanceOfKnownGlobal(HValue* context,
HValue* left,
@@ -4265,7 +4338,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4279,7 +4352,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
// TODO(mstarzinger): This instruction should be modeled as a load of the map
// field followed by a load of the instance size field once HLoadNamedField is
// flexible enough to accommodate byte-field loads.
-class HInstanceSize: public HTemplateInstruction<1> {
+class HInstanceSize V8_FINAL : public HTemplateInstruction<1> {
public:
explicit HInstanceSize(HValue* object) {
SetOperandAt(0, object);
@@ -4288,7 +4361,7 @@ class HInstanceSize: public HTemplateInstruction<1> {
HValue* object() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4296,7 +4369,7 @@ class HInstanceSize: public HTemplateInstruction<1> {
};
-class HPower: public HTemplateInstruction<2> {
+class HPower V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4306,19 +4379,19 @@ class HPower: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Double()
: Representation::None();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return RequiredInputRepresentation(index);
}
DECLARE_CONCRETE_INSTRUCTION(Power)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HPower(HValue* left, HValue* right) {
@@ -4329,13 +4402,13 @@ class HPower: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !right()->representation().IsTagged();
}
};
-class HRandom: public HTemplateInstruction<1> {
+class HRandom V8_FINAL : public HTemplateInstruction<1> {
public:
explicit HRandom(HValue* global_object) {
SetOperandAt(0, global_object);
@@ -4344,18 +4417,18 @@ class HRandom: public HTemplateInstruction<1> {
HValue* global_object() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Random)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HAdd: public HArithmeticBinaryOperation {
+class HAdd V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4364,15 +4437,16 @@ class HAdd: public HArithmeticBinaryOperation {
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
- virtual bool IsCommutative() const {
+ virtual bool IsCommutative() const V8_OVERRIDE {
return !representation().IsTagged();
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (left()->IsInteger32Constant()) {
decomposition->Apply(right(), left()->GetInteger32Constant());
return true;
@@ -4384,7 +4458,7 @@ class HAdd: public HArithmeticBinaryOperation {
}
}
- virtual void RepresentationChanged(Representation to) {
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (to.IsTagged()) ClearFlag(kAllowUndefinedAsNaN);
HArithmeticBinaryOperation::RepresentationChanged(to);
}
@@ -4392,9 +4466,9 @@ class HAdd: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HAdd(HValue* context, HValue* left, HValue* right)
@@ -4404,18 +4478,19 @@ class HAdd: public HArithmeticBinaryOperation {
};
-class HSub: public HArithmeticBinaryOperation {
+class HSub V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (right()->IsInteger32Constant()) {
decomposition->Apply(left(), -right()->GetInteger32Constant());
return true;
@@ -4427,9 +4502,9 @@ class HSub: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HSub(HValue* context, HValue* left, HValue* right)
@@ -4439,7 +4514,7 @@ class HSub: public HArithmeticBinaryOperation {
};
-class HMul: public HArithmeticBinaryOperation {
+class HMul V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4457,28 +4532,28 @@ class HMul: public HArithmeticBinaryOperation {
return mul;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
- virtual bool IsCommutative() const {
+ virtual bool IsCommutative() const V8_OVERRIDE {
return !representation().IsTagged();
}
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ const char* reason) V8_OVERRIDE {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HMul(HValue* context, HValue* left, HValue* right)
@@ -4488,7 +4563,7 @@ class HMul: public HArithmeticBinaryOperation {
};
-class HMod: public HArithmeticBinaryOperation {
+class HMod V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4508,13 +4583,14 @@ class HMod: public HArithmeticBinaryOperation {
return false;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4522,9 +4598,9 @@ class HMod: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HMod(HValue* context,
@@ -4541,7 +4617,7 @@ class HMod: public HArithmeticBinaryOperation {
};
-class HDiv: public HArithmeticBinaryOperation {
+class HDiv V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4557,13 +4633,14 @@ class HDiv: public HArithmeticBinaryOperation {
return false;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4571,9 +4648,9 @@ class HDiv: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HDiv(HValue* context, HValue* left, HValue* right)
@@ -4584,7 +4661,7 @@ class HDiv: public HArithmeticBinaryOperation {
};
-class HMathMinMax: public HArithmeticBinaryOperation {
+class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
@@ -4594,18 +4671,14 @@ class HMathMinMax: public HArithmeticBinaryOperation {
HValue* right,
Operation op);
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0 ? Representation::Tagged()
- : representation();
- }
-
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return RequiredInputRepresentation(index);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RepresentationFromInputs() {
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE {
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
Representation result = Representation::Smi();
@@ -4615,19 +4688,19 @@ class HMathMinMax: public HArithmeticBinaryOperation {
return result;
}
- virtual bool IsCommutative() const { return true; }
+ virtual bool IsCommutative() const V8_OVERRIDE { return true; }
Operation operation() { return operation_; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return other->IsMathMinMax() &&
HMathMinMax::cast(other)->operation_ == operation_;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
@@ -4638,7 +4711,7 @@ class HMathMinMax: public HArithmeticBinaryOperation {
};
-class HBitwise: public HBitwiseBinaryOperation {
+class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4648,20 +4721,20 @@ class HBitwise: public HBitwiseBinaryOperation {
Token::Value op() const { return op_; }
- virtual bool IsCommutative() const { return true; }
+ virtual bool IsCommutative() const V8_OVERRIDE { return true; }
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return op() == HBitwise::cast(other)->op();
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HBitwise(HValue* context,
@@ -4681,6 +4754,7 @@ class HBitwise: public HBitwiseBinaryOperation {
right->representation().IsSmi() &&
HConstant::cast(right)->Integer32Value() >= 0))) {
SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
// BIT_OR with a smi-range negative value will always set the entire
// sign-extension of the smi-sign.
} else if (op == Token::BIT_OR &&
@@ -4691,6 +4765,7 @@ class HBitwise: public HBitwiseBinaryOperation {
right->representation().IsSmi() &&
HConstant::cast(right)->Integer32Value() < 0))) {
SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
}
}
@@ -4698,18 +4773,18 @@ class HBitwise: public HBitwiseBinaryOperation {
};
-class HShl: public HBitwiseBinaryOperation {
+class HShl V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi() &&
!(right()->IsInteger32Constant() &&
right()->GetInteger32Constant() >= 0)) {
@@ -4721,7 +4796,7 @@ class HShl: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HShl(HValue* context, HValue* left, HValue* right)
@@ -4729,14 +4804,14 @@ class HShl: public HBitwiseBinaryOperation {
};
-class HShr: public HBitwiseBinaryOperation {
+class HShr V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (right()->IsInteger32Constant()) {
if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
// This is intended to look for HAdd and HSub, to handle compounds
@@ -4748,11 +4823,11 @@ class HShr: public HBitwiseBinaryOperation {
return false;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4760,7 +4835,7 @@ class HShr: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HShr(HValue* context, HValue* left, HValue* right)
@@ -4768,14 +4843,14 @@ class HShr: public HBitwiseBinaryOperation {
};
-class HSar: public HBitwiseBinaryOperation {
+class HSar V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (right()->IsInteger32Constant()) {
if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
// This is intended to look for HAdd and HSub, to handle compounds
@@ -4787,11 +4862,11 @@ class HSar: public HBitwiseBinaryOperation {
return false;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4799,7 +4874,7 @@ class HSar: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HSar(HValue* context, HValue* left, HValue* right)
@@ -4807,7 +4882,7 @@ class HSar: public HBitwiseBinaryOperation {
};
-class HRor: public HBitwiseBinaryOperation {
+class HRor V8_FINAL : public HBitwiseBinaryOperation {
public:
HRor(HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right) {
@@ -4816,7 +4891,7 @@ class HRor: public HBitwiseBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4824,17 +4899,17 @@ class HRor: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Ror)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
-class HOsrEntry: public HTemplateInstruction<0> {
+class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
BailoutId ast_id() const { return ast_id_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -4850,7 +4925,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
};
-class HParameter: public HTemplateInstruction<0> {
+class HParameter V8_FINAL : public HTemplateInstruction<0> {
public:
enum ParameterKind {
STACK_PARAMETER,
@@ -4865,9 +4940,9 @@ class HParameter: public HTemplateInstruction<0> {
unsigned index() const { return index_; }
ParameterKind kind() const { return kind_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -4894,7 +4969,7 @@ class HParameter: public HTemplateInstruction<0> {
};
-class HCallStub: public HUnaryCall {
+class HCallStub V8_FINAL : public HUnaryCall {
public:
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
@@ -4913,11 +4988,7 @@ class HCallStub: public HUnaryCall {
return transcendental_type_;
}
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CallStub)
@@ -4927,23 +4998,22 @@ class HCallStub: public HUnaryCall {
};
-class HUnknownOSRValue: public HTemplateInstruction<0> {
+class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
public:
- DECLARE_INSTRUCTION_FACTORY_P0(HUnknownOSRValue)
+ DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
+ virtual void PrintDataTo(StringStream* stream);
- void set_incoming_value(HPhi* value) {
- incoming_value_ = value;
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
}
- HPhi* incoming_value() {
- return incoming_value_;
- }
+ void set_incoming_value(HPhi* value) { incoming_value_ = value; }
+ HPhi* incoming_value() { return incoming_value_; }
+ HEnvironment *environment() { return environment_; }
+ int index() { return index_; }
- virtual Representation KnownOptimalRepresentation() {
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
if (incoming_value_ == NULL) return Representation::None();
return incoming_value_->KnownOptimalRepresentation();
}
@@ -4951,16 +5021,20 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
private:
- HUnknownOSRValue()
- : incoming_value_(NULL) {
+ HUnknownOSRValue(HEnvironment* environment, int index)
+ : environment_(environment),
+ index_(index),
+ incoming_value_(NULL) {
set_representation(Representation::Tagged());
}
+ HEnvironment* environment_;
+ int index_;
HPhi* incoming_value_;
};
-class HLoadGlobalCell: public HTemplateInstruction<0> {
+class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
public:
HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
: cell_(cell), details_(details), unique_id_() {
@@ -4972,30 +5046,30 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
Handle<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual intptr_t Hashcode() {
+ virtual intptr_t Hashcode() V8_OVERRIDE {
return unique_id_.Hashcode();
}
- virtual void FinalizeUniqueValueId() {
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
unique_id_ = UniqueValueId(cell_);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
return unique_id_ == b->unique_id_;
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
Handle<Cell> cell_;
PropertyDetails details_;
@@ -5003,7 +5077,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
};
-class HLoadGlobalGeneric: public HTemplateInstruction<2> {
+class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadGlobalGeneric(HValue* context,
HValue* global_object,
@@ -5022,9 +5096,9 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
Handle<Object> name() const { return name_; }
bool for_typeof() const { return for_typeof_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5036,7 +5110,7 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
-class HAllocate: public HTemplateInstruction<2> {
+class HAllocate V8_FINAL : public HTemplateInstruction<2> {
public:
static HAllocate* New(Zone* zone,
HValue* context,
@@ -5054,7 +5128,7 @@ class HAllocate: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -5098,14 +5172,10 @@ class HAllocate: public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
- void UpdateSize(HValue* size) {
- SetOperandAt(1, size);
- }
-
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator);
+ HValue* dominator) V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Allocate)
@@ -5152,6 +5222,10 @@ class HAllocate: public HTemplateInstruction<2> {
AllocationSite::CanTrack(instance_type);
}
+ void UpdateSize(HValue* size) {
+ SetOperandAt(1, size);
+ }
+
HAllocate* GetFoldableDominator(HAllocate* dominator);
void UpdateFreeSpaceFiller(int32_t filler_size);
@@ -5175,7 +5249,33 @@ class HAllocate: public HTemplateInstruction<2> {
};
-class HInnerAllocatedObject: public HTemplateInstruction<1> {
+class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
+ public:
+ static HStoreCodeEntry* New(Zone* zone,
+ HValue* context,
+ HValue* function,
+ HValue* code) {
+ return new(zone) HStoreCodeEntry(function, code);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ HValue* function() { return OperandAt(0); }
+ HValue* code_object() { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry)
+
+ private:
+ HStoreCodeEntry(HValue* function, HValue* code) {
+ SetOperandAt(0, function);
+ SetOperandAt(1, code);
+ }
+};
+
+
+class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
public:
static HInnerAllocatedObject* New(Zone* zone,
HValue* context,
@@ -5188,11 +5288,11 @@ class HInnerAllocatedObject: public HTemplateInstruction<1> {
HValue* base_object() { return OperandAt(0); }
int offset() { return offset_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
@@ -5239,7 +5339,7 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
}
-class HStoreGlobalCell: public HUnaryOperation {
+class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
Handle<PropertyCell>, PropertyDetails);
@@ -5252,10 +5352,10 @@ class HStoreGlobalCell: public HUnaryOperation {
return StoringValueNeedsWriteBarrier(value());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
@@ -5274,7 +5374,7 @@ class HStoreGlobalCell: public HUnaryOperation {
};
-class HStoreGlobalGeneric: public HTemplateInstruction<3> {
+class HStoreGlobalGeneric : public HTemplateInstruction<3> {
public:
inline static HStoreGlobalGeneric* New(Zone* zone,
HValue* context,
@@ -5292,9 +5392,9 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(2); }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5320,7 +5420,7 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
};
-class HLoadContextSlot: public HUnaryOperation {
+class HLoadContextSlot V8_FINAL : public HUnaryOperation {
public:
enum Mode {
// Perform a normal load of the context slot without checking its value.
@@ -5365,29 +5465,29 @@ class HLoadContextSlot: public HUnaryOperation {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (slot_index() == b->slot_index());
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
int slot_index_;
Mode mode_;
};
-class HStoreContextSlot: public HTemplateInstruction<2> {
+class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
public:
enum Mode {
// Perform a normal store to the context slot without checking its previous
@@ -5422,11 +5522,11 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
@@ -5445,7 +5545,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
// Represents an access to a portion of an object, such as the map pointer,
// array elements pointer, etc, but not accesses to array elements themselves.
-class HObjectAccess {
+class HObjectAccess V8_FINAL {
public:
inline bool IsInobject() const {
return portion() != kBackingStore && portion() != kExternalMemory;
@@ -5484,6 +5584,14 @@ class HObjectAccess {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
+ static HObjectAccess ForLiteralsPointer() {
+ return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
+ }
+
+ static HObjectAccess ForNextFunctionLinkPointer() {
+ return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
+ }
+
static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
return HObjectAccess(
kArrayLengths,
@@ -5528,6 +5636,35 @@ class HObjectAccess {
return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
}
+ static HObjectAccess ForSharedFunctionInfoPointer() {
+ return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset);
+ }
+
+ static HObjectAccess ForCodeEntryPointer() {
+ return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset);
+ }
+
+ static HObjectAccess ForCodeOffset() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
+ }
+
+ static HObjectAccess ForFirstCodeSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
+ }
+
+ static HObjectAccess ForFirstContextSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
+ }
+
+ static HObjectAccess ForOptimizedCodeMap() {
+ return HObjectAccess(kInobject,
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+ }
+
+ static HObjectAccess ForFunctionContextPointer() {
+ return HObjectAccess(kInobject, JSFunction::kContextOffset);
+ }
+
static HObjectAccess ForMap() {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
@@ -5558,6 +5695,8 @@ class HObjectAccess {
// Create an access to an in-object property in a JSArray.
static HObjectAccess ForJSArrayOffset(int offset);
+ static HObjectAccess ForContextSlot(int index);
+
// Create an access to the backing store of an object.
static HObjectAccess ForBackingStoreOffset(int offset,
Representation representation = Representation::Tagged());
@@ -5620,52 +5759,43 @@ class HObjectAccess {
};
-class HLoadNamedField: public HTemplateInstruction<2> {
+class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HLoadNamedField, HValue*, HObjectAccess);
- DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, HObjectAccess,
- HValue*);
HValue* object() { return OperandAt(0); }
- HValue* typecheck() {
- ASSERT(HasTypeCheck());
- return OperandAt(1);
- }
-
- bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
- void ClearTypeCheck() { SetOperandAt(1, object()); }
+ bool HasTypeCheck() { return object()->IsCheckMaps(); }
HObjectAccess access() const { return access_; }
Representation field_representation() const {
return access_.representation();
}
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ return !access().IsInobject() || access().offset() >= size;
+ }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
}
return Representation::Tagged();
}
- virtual Range* InferRange(Zone* zone);
- virtual void PrintDataTo(StringStream* stream);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadNamedField* b = HLoadNamedField::cast(other);
return access_.Equals(b->access_);
}
private:
- HLoadNamedField(HValue* object,
- HObjectAccess access,
- HValue* typecheck = NULL)
- : access_(access) {
+ HLoadNamedField(HValue* object, HObjectAccess access) : access_(access) {
ASSERT(object != NULL);
SetOperandAt(0, object);
- SetOperandAt(1, typecheck != NULL ? typecheck : object);
Representation representation = access.representation();
if (representation.IsSmi()) {
@@ -5685,13 +5815,13 @@ class HLoadNamedField: public HTemplateInstruction<2> {
access.SetGVNFlags(this, false);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
HObjectAccess access_;
};
-class HLoadNamedGeneric: public HTemplateInstruction<2> {
+class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
: name_(name) {
@@ -5705,11 +5835,11 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
@@ -5718,7 +5848,7 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> {
};
-class HLoadFunctionPrototype: public HUnaryOperation {
+class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
public:
explicit HLoadFunctionPrototype(HValue* function)
: HUnaryOperation(function) {
@@ -5729,14 +5859,14 @@ class HLoadFunctionPrototype: public HUnaryOperation {
HValue* function() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
class ArrayInstructionInterface {
@@ -5749,7 +5879,7 @@ class ArrayInstructionInterface {
virtual ~ArrayInstructionInterface() { };
static Representation KeyedAccessIndexRequirement(Representation r) {
- return r.IsInteger32() || kSmiValueSize != 31
+ return r.IsInteger32() || SmiValuesAre32Bits()
? Representation::Integer32() : Representation::Smi();
}
};
@@ -5761,7 +5891,7 @@ enum LoadKeyedHoleMode {
};
-class HLoadKeyed
+class HLoadKeyed V8_FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
@@ -5796,7 +5926,7 @@ class HLoadKeyed
return HoleModeField::decode(bit_field_);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// kind_fast: tagged[int32] (none)
// kind_double: tagged[int32] (none)
// kind_external: external[int32] (none)
@@ -5811,22 +5941,22 @@ class HLoadKeyed
return Representation::None();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return RequiredInputRepresentation(index);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool UsesMustHandleHole() const;
bool AllUsesCanTreatHoleAsNaN() const;
bool RequiresHoleCheck() const;
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
@@ -5886,7 +6016,7 @@ class HLoadKeyed
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !RequiresHoleCheck();
}
@@ -5922,7 +6052,7 @@ class HLoadKeyed
};
-class HLoadKeyedGeneric: public HTemplateInstruction<3> {
+class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
set_representation(Representation::Tagged());
@@ -5936,28 +6066,33 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged]
return Representation::Tagged();
}
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
};
-class HStoreNamedField: public HTemplateInstruction<3> {
+class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
HObjectAccess, HValue*);
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
- virtual bool HasEscapingOperandAt(int index) { return index == 1; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE {
+ return index == 1;
+ }
+ virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ return !access().IsInobject() || access().offset() >= size;
+ }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
@@ -5970,11 +6105,11 @@ class HStoreNamedField: public HTemplateInstruction<3> {
return Representation::Tagged();
}
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
+ HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; }
bool IsSkipWriteBarrier() const {
@@ -5991,7 +6126,8 @@ class HStoreNamedField: public HTemplateInstruction<3> {
Handle<Map> transition_map() const {
if (has_transition()) {
- return Handle<Map>::cast(HConstant::cast(transition())->handle());
+ return Handle<Map>::cast(
+ HConstant::cast(transition())->handle(Isolate::Current()));
} else {
return Handle<Map>();
}
@@ -5999,7 +6135,7 @@ class HStoreNamedField: public HTemplateInstruction<3> {
void SetTransition(HConstant* map_constant, CompilationInfo* info) {
ASSERT(!has_transition()); // Only set once.
- Handle<Map> map = Handle<Map>::cast(map_constant->handle());
+ Handle<Map> map = Handle<Map>::cast(map_constant->handle(info->isolate()));
if (map->CanBeDeprecated()) {
map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
}
@@ -6053,7 +6189,7 @@ class HStoreNamedField: public HTemplateInstruction<3> {
};
-class HStoreNamedGeneric: public HTemplateInstruction<3> {
+class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
HStoreNamedGeneric(HValue* context,
HValue* object,
@@ -6074,9 +6210,9 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
Handle<String> name() { return name_; }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6088,13 +6224,13 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
-class HStoreKeyed
+class HStoreKeyed V8_FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// kind_fast: tagged[int32] = tagged
// kind_double: tagged[int32] = double
// kind_smi : tagged[int32] = smi
@@ -6124,7 +6260,7 @@ class HStoreKeyed
return IsExternalArrayElementsKind(elements_kind());
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
if (index < 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
@@ -6165,7 +6301,7 @@ class HStoreKeyed
}
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
+ HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
@@ -6183,7 +6319,7 @@ class HStoreKeyed
bool NeedsCanonicalization();
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
@@ -6229,7 +6365,7 @@ class HStoreKeyed
};
-class HStoreKeyedGeneric: public HTemplateInstruction<4> {
+class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
HStoreKeyedGeneric(HValue* context,
HValue* object,
@@ -6250,12 +6386,12 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HValue* context() { return OperandAt(3); }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged] = tagged
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
@@ -6264,7 +6400,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
};
-class HTransitionElementsKind: public HTemplateInstruction<2> {
+class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
public:
inline static HTransitionElementsKind* New(Zone* zone,
HValue* context,
@@ -6275,7 +6411,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
original_map, transitioned_map);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6286,9 +6422,9 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
ElementsKind from_kind() { return from_kind_; }
ElementsKind to_kind() { return to_kind_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual void FinalizeUniqueValueId() {
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
original_map_unique_id_ = UniqueValueId(original_map_);
transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
}
@@ -6296,7 +6432,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
return original_map_unique_id_ == instr->original_map_unique_id_ &&
transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
@@ -6317,11 +6453,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
SetOperandAt(1, context);
SetFlag(kUseGVN);
SetGVNFlag(kChangesElementsKind);
- if (original_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
- if (transitioned_map->has_fast_double_elements()) {
+ if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
@@ -6337,7 +6469,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
};
-class HStringAdd: public HBinaryOperation {
+class HStringAdd V8_FINAL : public HBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -6347,14 +6479,14 @@ class HStringAdd: public HBinaryOperation {
StringAddFlags flags() const { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(StringAdd)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
@@ -6367,13 +6499,13 @@ class HStringAdd: public HBinaryOperation {
// No side-effects except possible allocation.
// NOTE: this instruction _does not_ call ToString() on its inputs.
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
const StringAddFlags flags_;
};
-class HStringCharCodeAt: public HTemplateInstruction<3> {
+class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
public:
static HStringCharCodeAt* New(Zone* zone,
HValue* context,
@@ -6396,9 +6528,9 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) {
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
@@ -6414,17 +6546,17 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
}
// No side effects: runtime function assumes string + number inputs.
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HStringCharFromCode: public HTemplateInstruction<2> {
+class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* char_code);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Tagged()
: Representation::Integer32();
@@ -6433,7 +6565,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
@@ -6447,14 +6579,14 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !value()->ToNumberCanBeObserved();
}
};
template <int V>
-class HMaterializedLiteral: public HTemplateInstruction<V> {
+class HMaterializedLiteral : public HTemplateInstruction<V> {
public:
HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
: literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
@@ -6474,7 +6606,7 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
}
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
int literal_index_;
int depth_;
@@ -6482,7 +6614,7 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
};
-class HRegExpLiteral: public HMaterializedLiteral<1> {
+class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
Handle<FixedArray> literals,
@@ -6503,7 +6635,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6516,7 +6648,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
};
-class HFunctionLiteral: public HTemplateInstruction<1> {
+class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
public:
HFunctionLiteral(HValue* context,
Handle<SharedFunctionInfo> shared,
@@ -6534,7 +6666,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6547,7 +6679,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
LanguageMode language_mode() const { return language_mode_; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
Handle<SharedFunctionInfo> shared_info_;
bool pretenure_ : 1;
@@ -6557,7 +6689,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
};
-class HTypeof: public HTemplateInstruction<2> {
+class HTypeof V8_FINAL : public HTemplateInstruction<2> {
public:
explicit HTypeof(HValue* context, HValue* value) {
SetOperandAt(0, context);
@@ -6568,24 +6700,24 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Typeof)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HTrapAllocationMemento : public HTemplateInstruction<1> {
+class HTrapAllocationMemento V8_FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6600,11 +6732,11 @@ class HTrapAllocationMemento : public HTemplateInstruction<1> {
};
-class HToFastProperties: public HUnaryOperation {
+class HToFastProperties V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6626,28 +6758,28 @@ class HToFastProperties: public HUnaryOperation {
#endif
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HValueOf: public HUnaryOperation {
+class HValueOf V8_FINAL : public HUnaryOperation {
public:
explicit HValueOf(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDateField: public HUnaryOperation {
+class HDateField V8_FINAL : public HUnaryOperation {
public:
HDateField(HValue* date, Smi* index)
: HUnaryOperation(date), index_(index) {
@@ -6656,7 +6788,7 @@ class HDateField: public HUnaryOperation {
Smi* index() const { return index_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6667,7 +6799,7 @@ class HDateField: public HUnaryOperation {
};
-class HSeqStringSetChar: public HTemplateInstruction<3> {
+class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
public:
HSeqStringSetChar(String::Encoding encoding,
HValue* string,
@@ -6684,7 +6816,7 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
HValue* index() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return (index == 0) ? Representation::Tagged()
: Representation::Integer32();
}
@@ -6696,17 +6828,17 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
};
-class HCheckMapValue: public HTemplateInstruction<2> {
+class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -6716,7 +6848,7 @@ class HCheckMapValue: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return true;
}
@@ -6733,7 +6865,7 @@ class HCheckMapValue: public HTemplateInstruction<2> {
};
-class HForInPrepareMap : public HTemplateInstruction<2> {
+class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
public:
static HForInPrepareMap* New(Zone* zone,
HValue* context,
@@ -6741,16 +6873,16 @@ class HForInPrepareMap : public HTemplateInstruction<2> {
return new(zone) HForInPrepareMap(context, object);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* context() { return OperandAt(0); }
HValue* enumerable() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -6767,11 +6899,11 @@ class HForInPrepareMap : public HTemplateInstruction<2> {
};
-class HForInCacheArray : public HTemplateInstruction<2> {
+class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6787,9 +6919,9 @@ class HForInCacheArray : public HTemplateInstruction<2> {
index_cache_ = index_cache;
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -6809,7 +6941,7 @@ class HForInCacheArray : public HTemplateInstruction<2> {
};
-class HLoadFieldByIndex : public HTemplateInstruction<2> {
+class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadFieldByIndex(HValue* object,
HValue* index) {
@@ -6818,23 +6950,23 @@ class HLoadFieldByIndex : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* object() { return OperandAt(0); }
HValue* index() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
index 73fa40a72..6b1df1e7a 100644
--- a/deps/v8/src/hydrogen-osr.cc
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -80,7 +80,8 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
osr_values_ = new(zone) ZoneList<HUnknownOSRValue*>(length, zone);
for (int i = 0; i < first_expression_index; ++i) {
- HUnknownOSRValue* osr_value = builder_->Add<HUnknownOSRValue>();
+ HUnknownOSRValue* osr_value
+ = builder_->Add<HUnknownOSRValue>(environment, i);
environment->Bind(i, osr_value);
osr_values_->Add(osr_value, zone);
}
@@ -88,12 +89,21 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
if (first_expression_index != length) {
environment->Drop(length - first_expression_index);
for (int i = first_expression_index; i < length; ++i) {
- HUnknownOSRValue* osr_value = builder_->Add<HUnknownOSRValue>();
+ HUnknownOSRValue* osr_value
+ = builder_->Add<HUnknownOSRValue>(environment, i);
environment->Push(osr_value);
osr_values_->Add(osr_value, zone);
}
}
+ unoptimized_frame_slots_ =
+ environment->local_count() + environment->push_count();
+
+ // Keep a copy of the old environment, since the OSR values need it
+ // to figure out where exactly they are located in the unoptimized frame.
+ environment = environment->Copy();
+ builder_->current_block()->UpdateEnvironment(environment);
+
builder_->Add<HSimulate>(osr_entry_id);
builder_->Add<HOsrEntry>(osr_entry_id);
HContext* context = builder_->Add<HContext>();
@@ -117,8 +127,9 @@ void HOsrBuilder::FinishOsrValues() {
const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
for (int j = 0; j < phis->length(); j++) {
HPhi* phi = phis->at(j);
- ASSERT(phi->HasMergedIndex());
- osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+ if (phi->HasMergedIndex()) {
+ osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+ }
}
}
diff --git a/deps/v8/src/hydrogen-osr.h b/deps/v8/src/hydrogen-osr.h
index 0c6b65d0d..5014a75bd 100644
--- a/deps/v8/src/hydrogen-osr.h
+++ b/deps/v8/src/hydrogen-osr.h
@@ -40,7 +40,8 @@ namespace internal {
class HOsrBuilder : public ZoneObject {
public:
explicit HOsrBuilder(HOptimizedGraphBuilder* builder)
- : builder_(builder),
+ : unoptimized_frame_slots_(0),
+ builder_(builder),
osr_entry_(NULL),
osr_loop_entry_(NULL),
osr_values_(NULL) { }
@@ -55,10 +56,16 @@ class HOsrBuilder : public ZoneObject {
// Process the OSR values and phis after initial graph optimization.
void FinishOsrValues();
+ // Return the number of slots in the unoptimized frame at the entry to OSR.
+ int UnoptimizedFrameSlots() const {
+ return unoptimized_frame_slots_;
+ }
+
private:
HBasicBlock* BuildLoopEntry();
bool HasOsrEntryAt(IterationStatement* statement);
+ int unoptimized_frame_slots_;
HOptimizedGraphBuilder* builder_;
HBasicBlock* osr_entry_;
HBasicBlock* osr_loop_entry_;
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index 862457db3..960113782 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -99,7 +99,8 @@ void HRepresentationChangesPhase::Run() {
// int32-phis allow truncation and iteratively remove the ones that
// are used in an operation that does not allow a truncating
// conversion.
- ZoneList<HPhi*> worklist(8, zone());
+ ZoneList<HPhi*> int_worklist(8, zone());
+ ZoneList<HPhi*> smi_worklist(8, zone());
const ZoneList<HPhi*>* phi_list(graph()->phi_list());
for (int i = 0; i < phi_list->length(); i++) {
@@ -108,51 +109,64 @@ void HRepresentationChangesPhase::Run() {
phi->SetFlag(HValue::kTruncatingToInt32);
} else if (phi->representation().IsSmi()) {
phi->SetFlag(HValue::kTruncatingToSmi);
+ phi->SetFlag(HValue::kTruncatingToInt32);
}
}
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if ((phi->representation().IsInteger32() &&
- !(input_representation.IsInteger32() &&
- use->CheckFlag(HValue::kTruncatingToInt32))) ||
- (phi->representation().IsSmi() &&
- !(input_representation.IsSmi() &&
- use->CheckFlag(HValue::kTruncatingToSmi)))) {
+ HValue* value = NULL;
+ if (phi->representation().IsSmiOrInteger32() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
+ int_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ }
+
+ if (phi->representation().IsSmi() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
+ smi_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToSmi);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ }
+ }
+
+ while (!int_worklist.is_empty()) {
+ HPhi* current = int_worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsSmiOrInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) {
if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
+ PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
}
- phi->ClearFlag(HValue::kTruncatingToInt32);
- phi->ClearFlag(HValue::kTruncatingToSmi);
- worklist.Add(phi, zone());
- break;
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ int_worklist.Add(HPhi::cast(input), zone());
}
}
}
- while (!worklist.is_empty()) {
- HPhi* current = worklist.RemoveLast();
+ while (!smi_worklist.is_empty()) {
+ HPhi* current = smi_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
- ((input->representation().IsInteger32() &&
- input->CheckFlag(HValue::kTruncatingToInt32)) ||
- (input->representation().IsSmi() &&
- input->CheckFlag(HValue::kTruncatingToSmi)))) {
+ input->representation().IsSmi() &&
+ input->CheckFlag(HValue::kTruncatingToSmi)) {
if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
+ PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
- input->ClearFlag(HValue::kTruncatingToInt32);
input->ClearFlag(HValue::kTruncatingToSmi);
- worklist.Add(HPhi::cast(input), zone());
+ smi_worklist.Add(HPhi::cast(input), zone());
}
}
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index f9ee50c1b..15ef5ed0b 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -649,7 +649,7 @@ HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
isolate()->factory()->name##_value(), \
- UniqueValueId(isolate()->heap()->name##_value()), \
+ UniqueValueId::name##_value(isolate()->heap()), \
Representation::Tagged(), \
htype, \
false, \
@@ -828,7 +828,6 @@ void HGraphBuilder::IfBuilder::Else() {
ASSERT(!captured_);
ASSERT(!finished_);
last_true_block_ = builder_->current_block();
- ASSERT(first_true_block_ == NULL || !last_true_block_->IsFinished());
builder_->set_current_block(first_false_block_);
did_else_ = true;
}
@@ -864,9 +863,11 @@ void HGraphBuilder::IfBuilder::End() {
if (!did_else_) {
last_true_block_ = builder_->current_block();
}
- if (first_true_block_ == NULL) {
+ if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
+ ASSERT(did_else_);
// Return on true. Nothing to do, just continue the false block.
- } else if (first_false_block_ == NULL) {
+ } else if (first_false_block_ == NULL ||
+ (did_else_ && builder_->current_block()->IsFinished())) {
// Deopt on false. Nothing to do except switching to the true block.
builder_->set_current_block(last_true_block_);
} else {
@@ -906,6 +907,24 @@ HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
header_block_ = builder->CreateLoopHeaderBlock();
body_block_ = NULL;
exit_block_ = NULL;
+ exit_trampoline_block_ = NULL;
+ increment_amount_ = builder_->graph()->GetConstant1();
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ LoopBuilder::Direction direction,
+ HValue* increment_amount)
+ : builder_(builder),
+ context_(context),
+ direction_(direction),
+ finished_(false) {
+ header_block_ = builder->CreateLoopHeaderBlock();
+ body_block_ = NULL;
+ exit_block_ = NULL;
+ exit_trampoline_block_ = NULL;
+ increment_amount_ = increment_amount;
}
@@ -921,12 +940,14 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
HEnvironment* body_env = env->Copy();
HEnvironment* exit_env = env->Copy();
- body_block_ = builder_->CreateBasicBlock(body_env);
- exit_block_ = builder_->CreateBasicBlock(exit_env);
// Remove the phi from the expression stack
body_env->Pop();
+ exit_env->Pop();
+ body_block_ = builder_->CreateBasicBlock(body_env);
+ exit_block_ = builder_->CreateBasicBlock(exit_env);
builder_->set_current_block(header_block_);
+ env->Pop();
HCompareNumericAndBranch* compare =
new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
compare->SetSuccessorAt(0, body_block_);
@@ -950,15 +971,26 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
}
+void HGraphBuilder::LoopBuilder::Break() {
+ if (exit_trampoline_block_ == NULL) {
+ // Its the first time we saw a break.
+ HEnvironment* env = exit_block_->last_environment()->Copy();
+ exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+ exit_block_->GotoNoSimulate(exit_trampoline_block_);
+ }
+
+ builder_->current_block()->GotoNoSimulate(exit_trampoline_block_);
+}
+
+
void HGraphBuilder::LoopBuilder::EndBody() {
ASSERT(!finished_);
if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
- HValue* one = builder_->graph()->GetConstant1();
if (direction_ == kPostIncrement) {
- increment_ = HAdd::New(zone(), context_, phi_, one);
+ increment_ = HAdd::New(zone(), context_, phi_, increment_amount_);
} else {
- increment_ = HSub::New(zone(), context_, phi_, one);
+ increment_ = HSub::New(zone(), context_, phi_, increment_amount_);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
@@ -970,9 +1002,11 @@ void HGraphBuilder::LoopBuilder::EndBody() {
last_block->GotoNoSimulate(header_block_);
header_block_->loop_information()->RegisterBackEdge(last_block);
- builder_->set_current_block(exit_block_);
- // Pop the phi from the expression stack
- builder_->environment()->Pop();
+ if (exit_trampoline_block_ != NULL) {
+ builder_->set_current_block(exit_trampoline_block_);
+ } else {
+ builder_->set_current_block(exit_block_);
+ }
finished_ = true;
}
@@ -1191,7 +1225,7 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
- HInstruction* elements = AddLoadElements(object, NULL);
+ HInstruction* elements = AddLoadElements(object);
HInstruction* empty_fixed_array = Add<HConstant>(
isolate()->factory()->empty_fixed_array());
@@ -1219,10 +1253,9 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* object,
+ HValue* checked_object,
HValue* key,
HValue* val,
- HCheckMaps* mapcheck,
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
@@ -1237,13 +1270,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
(elements_kind == FAST_ELEMENTS && is_store)) {
- if (mapcheck != NULL) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
+
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
- HValue* elements = AddLoadElements(object, mapcheck);
+ HValue* elements = AddLoadElements(checked_object);
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
@@ -1252,8 +1284,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
HInstruction* length = NULL;
if (is_js_array) {
- length = Add<HLoadNamedField>(object,
- HObjectAccess::ForArrayLength(elements_kind), mapcheck);
+ length = Add<HLoadNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(elements_kind));
} else {
length = AddLoadFixedArrayLength(elements);
}
@@ -1283,7 +1315,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
Add<HLoadExternalArrayPointer>(elements);
return AddExternalArrayElementAccess(
external_elements, checked_key, val,
- mapcheck, elements_kind, is_store);
+ checked_object, elements_kind, is_store);
}
}
ASSERT(fast_smi_only_elements ||
@@ -1300,8 +1332,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
- elements = BuildCheckForCapacityGrow(object, elements, elements_kind,
- length, key, is_js_array);
+ elements = BuildCheckForCapacityGrow(checked_object, elements,
+ elements_kind, length, key,
+ is_js_array);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
@@ -1309,9 +1342,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
-
- elements = BuildCopyElementsOnWrite(object, elements, elements_kind,
- length);
+ elements = BuildCopyElementsOnWrite(checked_object, elements,
+ elements_kind, length);
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(),
@@ -1320,7 +1352,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
}
}
- return AddFastElementAccess(elements, checked_key, val, mapcheck,
+ return AddFastElementAccess(elements, checked_key, val, checked_object,
elements_kind, is_store, load_mode, store_mode);
}
@@ -1493,11 +1525,8 @@ HInstruction* HGraphBuilder::AddFastElementAccess(
}
-HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
- HValue* typecheck) {
- return Add<HLoadNamedField>(object,
- HObjectAccess::ForElementsPointer(),
- typecheck);
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) {
+ return Add<HLoadNamedField>(object, HObjectAccess::ForElementsPointer());
}
@@ -1508,14 +1537,15 @@ HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
- HValue* half_old_capacity = Add<HShr>(old_capacity, graph_->GetConstant1());
+ HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
+ graph_->GetConstant1());
- HValue* new_capacity = Add<HAdd>(half_old_capacity, old_capacity);
+ HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
new_capacity->ClearFlag(HValue::kCanOverflow);
HValue* min_growth = Add<HConstant>(16);
- new_capacity = Add<HAdd>(new_capacity, min_growth);
+ new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
new_capacity->ClearFlag(HValue::kCanOverflow);
return new_capacity;
@@ -1679,22 +1709,12 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
if (mode == TRACK_ALLOCATION_SITE) {
size += AllocationMemento::kSize;
}
- int elems_offset = size;
- InstanceType instance_type = IsFastDoubleElementsKind(kind) ?
- FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
- if (length > 0) {
- size += IsFastDoubleElementsKind(kind)
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
HValue* size_in_bytes = Add<HConstant>(size);
HInstruction* object = Add<HAllocate>(size_in_bytes,
HType::JSObject(),
NOT_TENURED,
- instance_type);
+ JS_OBJECT_TYPE);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -1711,10 +1731,17 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
}
if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- HValue* boilerplate_elements = AddLoadElements(boilerplate, NULL);
- HValue* object_elements = Add<HInnerAllocatedObject>(object, elems_offset);
+ HValue* boilerplate_elements = AddLoadElements(boilerplate);
+ HValue* object_elements;
+ if (IsFastDoubleElementsKind(kind)) {
+ HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
+ object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
+ object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ NOT_TENURED, FIXED_ARRAY_TYPE);
+ }
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
object_elements);
@@ -1747,22 +1774,35 @@ void HGraphBuilder::BuildCompareNil(
int position,
HIfContinuation* continuation) {
IfBuilder if_nil(this, position);
- bool needs_or = false;
+ bool some_case_handled = false;
+ bool some_case_missing = false;
+
if (type->Maybe(Type::Null())) {
- if (needs_or) if_nil.Or();
+ if (some_case_handled) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
- needs_or = true;
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
}
+
if (type->Maybe(Type::Undefined())) {
- if (needs_or) if_nil.Or();
+ if (some_case_handled) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value,
graph()->GetConstantUndefined());
- needs_or = true;
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
}
+
if (type->Maybe(Type::Undetectable())) {
- if (needs_or) if_nil.Or();
+ if (some_case_handled) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
+ some_case_handled = true;
} else {
+ some_case_missing = true;
+ }
+
+ if (some_case_missing) {
if_nil.Then();
if_nil.Else();
if (type->NumClasses() == 1) {
@@ -1784,7 +1824,8 @@ void HGraphBuilder::BuildCompareNil(
HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
int previous_object_size,
HValue* alloc_site) {
- ASSERT(alloc_site != NULL);
+ // TODO(mvstanton): ASSERT altered to CHECK to diagnose chromium bug 284577
+ CHECK(alloc_site != NULL);
HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
Handle<Map> alloc_memento_map(
@@ -1846,7 +1887,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access, NULL));
+ builder()->BuildLoadNamedField(constructor_function_, access));
}
HInstruction* native_context = builder()->BuildGetNativeContext();
@@ -1867,7 +1908,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access, NULL));
+ builder()->BuildLoadNamedField(constructor_function_, access));
}
@@ -2000,7 +2041,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
- InitializeAstVisitor();
+ InitializeAstVisitor(info->isolate());
}
@@ -3963,13 +4004,20 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+static bool CanInlinePropertyAccess(Map* type) {
+ return type->IsJSObjectMap() &&
+ !type->is_dictionary_map() &&
+ !type->has_named_interceptor();
+}
+
+
static void LookupInPrototypes(Handle<Map> map,
Handle<String> name,
LookupResult* lookup) {
while (map->prototype()->IsJSObject()) {
Handle<JSObject> holder(JSObject::cast(map->prototype()));
- if (!holder->HasFastProperties()) break;
map = Handle<Map>(holder->map());
+ if (!CanInlinePropertyAccess(*map)) break;
map->LookupDescriptor(*holder, *name, lookup);
if (lookup->IsFound()) return;
}
@@ -4049,9 +4097,7 @@ static bool LookupSetter(Handle<Map> map,
// size of all objects that are part of the graph.
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
- int* max_properties,
- int* data_size,
- int* pointer_size) {
+ int* max_properties) {
if (boilerplate->map()->is_deprecated()) {
Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
if (result->IsSmi()) return false;
@@ -4064,9 +4110,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastDoubleElements()) {
- *data_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastObjectElements()) {
+ if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -4076,15 +4120,12 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- data_size,
- pointer_size)) {
+ max_properties)) {
return false;
}
}
}
- *pointer_size += FixedArray::SizeFor(length);
- } else {
+ } else if (!boilerplate->HasFastDoubleElements()) {
return false;
}
}
@@ -4099,7 +4140,6 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
- Representation representation = details.representation();
int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
@@ -4107,18 +4147,12 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- data_size,
- pointer_size)) {
+ max_properties)) {
return false;
}
- } else if (representation.IsDouble()) {
- *data_size += HeapNumber::kSize;
}
}
}
-
- *pointer_size += boilerplate->map()->instance_size();
return true;
}
@@ -4128,32 +4162,21 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- HValue* context = environment()->context();
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
- int data_size = 0;
- int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
- Handle<Object> original_boilerplate(closure->literals()->get(
+ Handle<Object> boilerplate(closure->literals()->get(
expr->literal_index()), isolate());
- if (original_boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(original_boilerplate),
+ if (boilerplate->IsJSObject() &&
+ IsFastLiteral(Handle<JSObject>::cast(boilerplate),
kMaxFastLiteralDepth,
- &max_properties,
- &data_size,
- &pointer_size)) {
- Handle<JSObject> original_boilerplate_object =
- Handle<JSObject>::cast(original_boilerplate);
+ &max_properties)) {
Handle<JSObject> boilerplate_object =
- DeepCopy(original_boilerplate_object);
+ Handle<JSObject>::cast(boilerplate);
- literal = BuildFastLiteral(context,
- boilerplate_object,
- original_boilerplate_object,
+ literal = BuildFastLiteral(boilerplate_object,
Handle<Object>::null(),
- data_size,
- pointer_size,
DONT_TRACK_ALLOCATION_SITE);
} else {
NoObservableSideEffectsScope no_effects(this);
@@ -4255,7 +4278,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(current_block()->HasPredecessor());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
- HValue* context = environment()->context();
HInstruction* literal;
Handle<AllocationSite> site;
@@ -4289,10 +4311,10 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!raw_boilerplate.is_null());
ASSERT(site->IsLiteralSite());
- Handle<JSObject> original_boilerplate_object =
+ Handle<JSObject> boilerplate_object =
Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(original_boilerplate_object)->GetElementsKind();
+ Handle<JSObject>::cast(boilerplate_object)->GetElementsKind();
// TODO(mvstanton): This heuristic is only a temporary solution. In the
// end, we want to quit creating allocation site info after a certain number
@@ -4301,26 +4323,12 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
- int data_size = 0;
- int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
- HCheckMaps* type_check = NULL;
- if (IsFastLiteral(original_boilerplate_object,
+ if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
- &max_properties,
- &data_size,
- &pointer_size)) {
- if (mode == TRACK_ALLOCATION_SITE) {
- pointer_size += AllocationMemento::kSize;
- }
-
- Handle<JSObject> boilerplate_object = DeepCopy(original_boilerplate_object);
- literal = BuildFastLiteral(context,
- boilerplate_object,
- original_boilerplate_object,
+ &max_properties)) {
+ literal = BuildFastLiteral(boilerplate_object,
site,
- data_size,
- pointer_size,
mode);
} else {
NoObservableSideEffectsScope no_effects(this);
@@ -4340,9 +4348,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
3);
// De-opt if elements kind changed from boilerplate_elements_kind.
- Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
- isolate());
- type_check = Add<HCheckMaps>(literal, map, top_info());
+ Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
+ literal = Add<HCheckMaps>(literal, map, top_info());
}
// The array is expected in the bailout environment during computation
@@ -4363,7 +4370,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
- elements = AddLoadElements(literal, type_check);
+ elements = AddLoadElements(literal);
HValue* key = Add<HConstant>(i);
@@ -4398,8 +4405,8 @@ static bool ComputeLoadStoreField(Handle<Map> type,
LookupResult* lookup,
bool is_store) {
ASSERT(!is_store || !type->is_observed());
- if (type->has_named_interceptor()) {
- lookup->InterceptorResult(NULL);
+ if (!CanInlinePropertyAccess(*type)) {
+ lookup->NotFound();
return false;
}
// If we directly find a field, the access can be inlined.
@@ -4425,7 +4432,7 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
- HValue* object,
+ HValue* checked_object,
Handle<String> name,
HValue* value,
Handle<Map> map,
@@ -4477,11 +4484,12 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
value);
- instr = New<HStoreNamedField>(object, heap_number_access,
- heap_number);
+ instr = New<HStoreNamedField>(checked_object->ActualValue(),
+ heap_number_access,
+ heap_number);
} else {
// Already holds a HeapNumber; load the box and write its value field.
- HInstruction* heap_number = Add<HLoadNamedField>(object,
+ HInstruction* heap_number = Add<HLoadNamedField>(checked_object,
heap_number_access);
heap_number->set_type(HType::HeapNumber());
instr = New<HStoreNamedField>(heap_number,
@@ -4490,7 +4498,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
}
} else {
// This is a normal store.
- instr = New<HStoreNamedField>(object, field_access, value);
+ instr = New<HStoreNamedField>(checked_object->ActualValue(),
+ field_access,
+ value);
}
if (transition_to_field) {
@@ -4527,8 +4537,8 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
// Handle a store to a known field.
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
- AddCheckMap(object, map);
- return BuildStoreNamedField(object, name, value, map, &lookup);
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ return BuildStoreNamedField(checked_object, name, value, map, &lookup);
}
// No luck, do a generic store.
@@ -4539,8 +4549,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
static bool CanLoadPropertyFromPrototype(Handle<Map> map,
Handle<Name> name,
LookupResult* lookup) {
- if (map->has_named_interceptor()) return false;
- if (map->is_dictionary_map()) return false;
+ if (!CanInlinePropertyAccess(*map)) return false;
map->LookupDescriptor(NULL, *name, lookup);
if (lookup->IsFound()) return false;
return true;
@@ -4548,7 +4557,6 @@ static bool CanLoadPropertyFromPrototype(Handle<Map> map,
HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
- Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name) {
@@ -4586,8 +4594,8 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
if (count == types->length()) {
// Everything matched; can use monomorphic load.
BuildCheckHeapObject(object);
- HCheckMaps* type_check = Add<HCheckMaps>(object, types);
- return BuildLoadNamedField(object, access, type_check);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
+ return BuildLoadNamedField(checked_object, access);
}
if (count != 0) return NULL;
@@ -4608,14 +4616,14 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
if (!lookup.IsField()) return NULL;
BuildCheckHeapObject(object);
- HCheckMaps* type_check = Add<HCheckMaps>(object, types);
+ Add<HCheckMaps>(object, types);
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
- BuildCheckPrototypeMaps(Handle<JSObject>::cast(prototype), holder);
- HValue* holder_value = Add<HConstant>(holder);
- return BuildLoadNamedField(holder_value,
- HObjectAccess::ForField(holder_map, &lookup, name), type_check);
+ HValue* checked_holder = BuildCheckPrototypeMaps(
+ Handle<JSObject>::cast(prototype), holder);
+ return BuildLoadNamedField(checked_holder,
+ HObjectAccess::ForField(holder_map, &lookup, name));
}
@@ -4632,9 +4640,8 @@ static bool PrototypeChainCanNeverResolve(
if (current->IsJSGlobalProxy() ||
current->IsGlobalObject() ||
!current->IsJSObject() ||
- JSObject::cast(current)->map()->has_named_interceptor() ||
- JSObject::cast(current)->IsAccessCheckNeeded() ||
- !JSObject::cast(current)->HasFastProperties()) {
+ !CanInlinePropertyAccess(JSObject::cast(current)->map()) ||
+ JSObject::cast(current)->IsAccessCheckNeeded()) {
return false;
}
@@ -4650,15 +4657,15 @@ static bool PrototypeChainCanNeverResolve(
void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
- Property* expr,
+ int position,
+ BailoutId ast_id,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- HInstruction* instr = TryLoadPolymorphicAsMonomorphic(
- expr, object, types, name);
+ HInstruction* instr = TryLoadPolymorphicAsMonomorphic(object, types, name);
if (instr != NULL) {
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ instr->set_position(position);
+ return ast_context()->ReturnInstruction(instr, ast_id);
}
// Something did not match; must use a polymorphic load.
@@ -4669,8 +4676,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, false) ||
(lookup.IsCacheable() &&
- !map->is_dictionary_map() &&
- !map->has_named_interceptor() &&
+ CanInlinePropertyAccess(*map) &&
(lookup.IsConstant() ||
(!lookup.IsFound() &&
PrototypeChainCanNeverResolve(map, name))))) {
@@ -4690,8 +4696,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
// TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic.
if (lookup.IsField()) {
HObjectAccess access = HObjectAccess::ForField(map, &lookup, name);
- HLoadNamedField* load = BuildLoadNamedField(object, access, compare);
- load->set_position(expr->position());
+ HLoadNamedField* load = BuildLoadNamedField(compare, access);
+ load->set_position(position);
AddInstruction(load);
if (!ast_context()->IsEffect()) Push(load);
} else if (lookup.IsConstant()) {
@@ -4722,22 +4728,23 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
} else {
- HInstruction* load = BuildLoadNamedGeneric(object, name, expr);
- load->set_position(expr->position());
+ HValue* context = environment()->context();
+ HInstruction* load = new(zone()) HLoadNamedGeneric(context, object, name);
+ load->set_position(position);
AddInstruction(load);
if (!ast_context()->IsEffect()) Push(load);
if (join != NULL) {
current_block()->Goto(join);
} else {
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
return;
}
}
ASSERT(join != NULL);
- join->SetJoinId(expr->id());
+ join->SetJoinId(ast_id);
set_current_block(join);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@@ -4747,8 +4754,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
int position,
BailoutId assignment_id,
HValue* object,
- HValue* store_value,
- HValue* result_value,
+ HValue* value,
SmallMapList* types,
Handle<String> name) {
// Use monomorphic store if property lookup results in the same field index
@@ -4790,18 +4796,18 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
// Everything matched; can use monomorphic store.
BuildCheckHeapObject(object);
- Add<HCheckMaps>(object, types);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
HInstruction* store;
CHECK_ALIVE_OR_RETURN(
store = BuildStoreNamedField(
- object, name, store_value, types->at(count - 1), &lookup),
+ checked_object, name, value, types->at(count - 1), &lookup),
true);
- if (!ast_context()->IsEffect()) Push(result_value);
+ if (!ast_context()->IsEffect()) Push(value);
store->set_position(position);
AddInstruction(store);
Add<HSimulate>(assignment_id);
if (!ast_context()->IsEffect()) Drop(1);
- ast_context()->ReturnValue(result_value);
+ ast_context()->ReturnValue(value);
return true;
}
@@ -4810,13 +4816,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
int position,
BailoutId assignment_id,
HValue* object,
- HValue* store_value,
- HValue* result_value,
+ HValue* value,
SmallMapList* types,
Handle<String> name) {
if (TryStorePolymorphicAsMonomorphic(
- position, assignment_id, object,
- store_value, result_value, types, name)) {
+ position, assignment_id, object, value, types, name)) {
return;
}
@@ -4843,11 +4847,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
set_current_block(if_true);
HInstruction* instr;
CHECK_ALIVE(instr = BuildStoreNamedField(
- object, name, store_value, map, &lookup));
+ compare, name, value, map, &lookup));
instr->set_position(position);
// Goto will add the HSimulate for the store.
AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(result_value);
+ if (!ast_context()->IsEffect()) Push(value);
current_block()->Goto(join);
set_current_block(if_false);
@@ -4860,13 +4864,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
} else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, store_value);
+ HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
instr->set_position(position);
AddInstruction(instr);
if (join != NULL) {
if (!ast_context()->IsEffect()) {
- Push(result_value);
+ Push(value);
}
current_block()->Goto(join);
} else {
@@ -4877,12 +4881,12 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
if (ast_context()->IsEffect()) {
Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
} else {
- Push(result_value);
+ Push(value);
Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
- return ast_context()->ReturnValue(result_value);
+ return ast_context()->ReturnValue(value);
}
}
@@ -4895,40 +4899,111 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
}
-void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- CHECK_ALIVE(VisitForValue(prop->obj()));
+static bool ComputeReceiverTypes(Expression* expr,
+ HValue* receiver,
+ SmallMapList** t) {
+ SmallMapList* types = expr->GetReceiverTypes();
+ *t = types;
+ bool monomorphic = expr->IsMonomorphic();
+ if (types != NULL && receiver->HasMonomorphicJSObjectType()) {
+ Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
+ types->FilterForPossibleTransitions(root_map);
+ monomorphic = types->length() == 1;
+ }
+ return monomorphic && CanInlinePropertyAccess(*types->first());
+}
- if (prop->key()->IsPropertyName()) {
- // Named store.
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = environment()->ExpressionStackAt(0);
- HValue* object = environment()->ExpressionStackAt(1);
- if (expr->IsUninitialized()) {
- Add<HDeoptimize>("Insufficient type feedback for property assignment",
- Deoptimizer::SOFT);
- }
- return BuildStoreNamed(expr, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, value, value);
- } else {
+void HOptimizedGraphBuilder::BuildStore(Expression* expr,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool is_uninitialized) {
+ HValue* value = environment()->ExpressionStackAt(0);
+
+ if (!prop->key()->IsPropertyName()) {
// Keyed store.
- CHECK_ALIVE(VisitForValue(prop->key()));
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = environment()->ExpressionStackAt(0);
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
+ HandleKeyedElementAccess(object, key, value, expr, return_id,
expr->position(),
true, // is_store
&has_side_effects);
Drop(3);
Push(value);
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
+
+ // Named store.
+ HValue* object = environment()->ExpressionStackAt(1);
+
+ if (is_uninitialized) {
+ Add<HDeoptimize>("Insufficient type feedback for property assignment",
+ Deoptimizer::SOFT);
+ }
+
+ Literal* key = prop->key()->AsLiteral();
+ Handle<String> name = Handle<String>::cast(key->value());
+ ASSERT(!name.is_null());
+
+ HInstruction* instr = NULL;
+
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ AddCheckConstantFunction(holder, object, map);
+ if (FLAG_inline_accessors &&
+ TryInlineSetter(setter, ast_id, return_id, value)) {
+ return;
+ }
+ Drop(2);
+ Add<HPushArgument>(object);
+ Add<HPushArgument>(value);
+ instr = new(zone()) HCallConstantFunction(setter, 2);
+ } else {
+ Drop(2);
+ CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
+ name,
+ value,
+ map));
+ }
+ } else if (types != NULL && types->length() > 1) {
+ Drop(2);
+ return HandlePolymorphicStoreNamedField(
+ expr->position(), ast_id, object, value, types, name);
+ } else {
+ Drop(2);
+ instr = BuildStoreNamedGeneric(object, name, value);
+ }
+
+ if (!ast_context()->IsEffect()) Push(value);
+ instr->set_position(expr->position());
+ AddInstruction(instr);
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ }
+ if (!ast_context()->IsEffect()) Drop(1);
+ return ast_context()->ReturnValue(value);
+}
+
+
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ if (!prop->key()->IsPropertyName()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ }
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ BuildStore(expr, prop, expr->id(),
+ expr->AssignmentId(), expr->IsUninitialized());
}
@@ -4977,70 +5052,6 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
-void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
- BailoutId id,
- int position,
- BailoutId assignment_id,
- Property* prop,
- HValue* object,
- HValue* store_value,
- HValue* result_value) {
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->value());
- ASSERT(!name.is_null());
-
- HInstruction* instr = NULL;
- SmallMapList* types = expr->GetReceiverTypes();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> map;
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
- // Don't try to inline if the result_value is different from the
- // store_value. That case isn't handled yet by the inlining.
- if (result_value == store_value &&
- FLAG_inline_accessors &&
- TryInlineSetter(setter, id, assignment_id, store_value)) {
- return;
- }
- Drop(2);
- Add<HPushArgument>(object);
- Add<HPushArgument>(store_value);
- instr = new(zone()) HCallConstantFunction(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- store_value,
- map));
- }
- } else if (types != NULL && types->length() > 1) {
- Drop(2);
- return HandlePolymorphicStoreNamedField(
- position, id, object,
- store_value, result_value, types, name);
- } else {
- Drop(2);
- instr = BuildStoreNamedGeneric(object, name, store_value);
- }
-
- if (!ast_context()->IsEffect()) Push(result_value);
- instr->set_position(position);
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(id, REMOVABLE_SIMULATE);
- }
- if (!ast_context()->IsEffect()) Drop(1);
- return ast_context()->ReturnValue(result_value);
-}
-
-
void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
@@ -5122,89 +5133,30 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
- if (prop->key()->IsPropertyName()) {
- // Named property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load = NULL;
- SmallMapList* types = prop->GetReceiverTypes();
- bool monomorphic = prop->IsMonomorphic();
- if (monomorphic) {
- map = types->first();
- // We can't generate code for a monomorphic dict mode load so
- // just pretend it is not monomorphic.
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else if (types != NULL && types->length() > 1) {
- load = TryLoadPolymorphicAsMonomorphic(prop, object, types, name);
- }
- if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
- }
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
-
- return BuildStoreNamed(expr, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, instr, instr);
- } else {
- // Keyed property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* object = Top();
+ HValue* key = NULL;
+ if ((!prop->IsStringLength() &&
+ !prop->IsFunctionPrototype() &&
+ !prop->key()->IsPropertyName()) ||
+ prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
+ key = Top();
+ }
- HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
+ CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
- // Drop the simulated receiver, key, and value. Return the value.
- Drop(3);
- Push(instr);
- ASSERT(has_side_effects); // Stores always have side effects.
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- return ast_context()->ReturnValue(Pop());
- }
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HInstruction* instr = BuildBinaryOperation(operation, left, right);
+ PushAndAdd(instr);
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
+ }
+ BuildStore(expr, prop, expr->id(),
+ expr->AssignmentId(), expr->IsUninitialized());
} else {
return Bailout(kInvalidLhsInCompoundAssignment);
}
@@ -5361,32 +5313,29 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- HObjectAccess access,
- HValue* typecheck) {
+ HObjectAccess access) {
if (FLAG_track_double_fields && access.representation().IsDouble()) {
// load the heap number
HLoadNamedField* heap_number = Add<HLoadNamedField>(
object, access.WithRepresentation(Representation::Tagged()));
heap_number->set_type(HType::HeapNumber());
// load the double value from it
- return New<HLoadNamedField>(heap_number,
- HObjectAccess::ForHeapNumberValue(),
- typecheck);
+ return New<HLoadNamedField>(
+ heap_number, HObjectAccess::ForHeapNumberValue());
}
- return New<HLoadNamedField>(object, access, typecheck);
+ return New<HLoadNamedField>(object, access);
}
HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
- HValue* typecheck) {
+ HValue* checked_string) {
if (FLAG_fold_constants && object->IsConstant()) {
HConstant* constant = HConstant::cast(object);
if (constant->HasStringValue()) {
return New<HConstant>(constant->StringValue()->length());
}
}
- return BuildLoadNamedField(
- object, HObjectAccess::ForStringLength(), typecheck);
+ return BuildLoadNamedField(checked_string, HObjectAccess::ForStringLength());
}
@@ -5395,7 +5344,7 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
Handle<String> name,
Property* expr) {
if (expr->IsUninitialized()) {
- Add<HDeoptimize>("Insufficient feedback for generic named load",
+ Add<HDeoptimize>("Insufficient type feedback for generic named load",
Deoptimizer::SOFT);
}
HValue* context = environment()->context();
@@ -5417,7 +5366,6 @@ HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
HValue* object,
Handle<String> name,
- Property* expr,
Handle<Map> map) {
// Handle a load from a known field.
ASSERT(!map->is_dictionary_map());
@@ -5425,18 +5373,19 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
// Handle access to various length properties
if (name->Equals(isolate()->heap()->length_string())) {
if (map->instance_type() == JS_ARRAY_TYPE) {
- HCheckMaps* type_check = AddCheckMap(object, map);
- return New<HLoadNamedField>(object,
- HObjectAccess::ForArrayLength(map->elements_kind()), type_check);
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ return New<HLoadNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(map->elements_kind()));
}
}
LookupResult lookup(isolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsField()) {
- HCheckMaps* type_check = AddCheckMap(object, map);
- return BuildLoadNamedField(object,
- HObjectAccess::ForField(map, &lookup, name), type_check);
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ ASSERT(map->IsJSObjectMap());
+ return BuildLoadNamedField(
+ checked_object, HObjectAccess::ForField(map, &lookup, name));
}
// Handle a load of a constant known function.
@@ -5446,17 +5395,22 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
return New<HConstant>(constant);
}
+ if (lookup.IsFound()) {
+ // Cannot handle the property, do a generic load instead.
+ HValue* context = environment()->context();
+ return new(zone()) HLoadNamedGeneric(context, object, name);
+ }
+
// Handle a load from a known field somewhere in the prototype chain.
LookupInPrototypes(map, name, &lookup);
if (lookup.IsField()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
- HCheckMaps* type_check = AddCheckMap(object, map);
- BuildCheckPrototypeMaps(prototype, holder);
- HValue* holder_value = Add<HConstant>(holder);
- return BuildLoadNamedField(holder_value,
- HObjectAccess::ForField(holder_map, &lookup, name), type_check);
+ AddCheckMap(object, map);
+ HValue* checked_holder = BuildCheckPrototypeMaps(prototype, holder);
+ return BuildLoadNamedField(
+ checked_holder, HObjectAccess::ForField(holder_map, &lookup, name));
}
// Handle a load of a constant function somewhere in the prototype chain.
@@ -5471,7 +5425,8 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
}
// No luck, do a generic load.
- return BuildLoadNamedGeneric(object, name, expr);
+ HValue* context = environment()->context();
+ return new(zone()) HLoadNamedGeneric(context, object, name);
}
@@ -5482,19 +5437,7 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
}
-HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store,
- KeyedAccessStoreMode store_mode) {
- HCheckMaps* mapcheck = Add<HCheckMaps>(object, map, top_info(), dependency);
- if (dependency) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
-
+LoadKeyedHoleMode HOptimizedGraphBuilder::BuildKeyedHoleMode(Handle<Map> map) {
// Loads from a "stock" fast holey double arrays can elide the hole check.
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
if (*map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS) &&
@@ -5506,10 +5449,30 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
graph()->MarkDependsOnEmptyArrayProtoElements();
}
+ return load_mode;
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HValue* dependency,
+ Handle<Map> map,
+ bool is_store,
+ KeyedAccessStoreMode store_mode) {
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
+ dependency);
+ if (dependency) {
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ }
+
+ LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
return BuildUncheckedMonomorphicElementAccess(
- object, key, val,
- mapcheck, map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store, load_mode, store_mode);
+ checked_object, key, val,
+ map->instance_type() == JS_ARRAY_TYPE,
+ map->elements_kind(), is_store,
+ load_mode, store_mode);
}
@@ -5530,6 +5493,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
Handle<Map> most_general_consolidated_map;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
+ if (!map->IsJSObjectMap()) return NULL;
// Don't allow mixing of JSArrays with JSObjects.
if (map->instance_type() == JS_ARRAY_TYPE) {
if (has_non_js_array_access) return NULL;
@@ -5563,14 +5527,14 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
- HCheckMaps* check_maps = Add<HCheckMaps>(object, maps);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
// FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
// If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
ElementsKind consolidated_elements_kind = has_seen_holey_elements
? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
: most_general_consolidated_map->elements_kind();
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps,
+ checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
false, NEVER_RETURN_HOLE, STANDARD_STORE);
@@ -5582,7 +5546,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* object,
HValue* key,
HValue* val,
- Expression* prop,
+ SmallMapList* maps,
BailoutId ast_id,
int position,
bool is_store,
@@ -5590,7 +5554,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- SmallMapList* maps = prop->GetReceiverTypes();
if (!is_store) {
HInstruction* consolidated_load =
@@ -5646,7 +5609,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
if (untransitionable_maps.length() == 1) {
Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
- if (untransitionable_map->has_slow_elements_kind()) {
+ if (untransitionable_map->has_slow_elements_kind() ||
+ !untransitionable_map->IsJSObjectMap()) {
instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key));
} else {
@@ -5659,14 +5623,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
return is_store ? NULL : instr;
}
- HInstruction* checkspec =
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
- HInstruction* elements = AddLoadElements(object, checkspec);
-
for (int i = 0; i < untransitionable_maps.length(); ++i) {
Handle<Map> map = untransitionable_maps[i];
+ if (!map->IsJSObjectMap()) continue;
ElementsKind elements_kind = map->elements_kind();
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
@@ -5675,40 +5636,22 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
current_block()->Finish(mapcompare);
set_current_block(this_map);
- HInstruction* checked_key = NULL;
HInstruction* access = NULL;
- if (IsFastElementsKind(elements_kind)) {
- if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
- Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map(),
- top_info(), mapcompare);
- }
- if (map->instance_type() == JS_ARRAY_TYPE) {
- HInstruction* length = Add<HLoadNamedField>(
- object, HObjectAccess::ForArrayLength(elements_kind), mapcompare);
- checked_key = Add<HBoundsCheck>(key, length);
- } else {
- HInstruction* length = AddLoadFixedArrayLength(elements);
- checked_key = Add<HBoundsCheck>(key, length);
- }
- access = AddFastElementAccess(
- elements, checked_key, val, mapcompare,
- elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE);
- } else if (IsDictionaryElementsKind(elements_kind)) {
- if (is_store) {
- access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
- } else {
- access = AddInstruction(BuildLoadKeyedGeneric(object, key));
- }
+ if (IsDictionaryElementsKind(elements_kind)) {
+ access = is_store
+ ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
+ : AddInstruction(BuildLoadKeyedGeneric(object, key));
} else {
- ASSERT(IsExternalArrayElementsKind(elements_kind));
- HInstruction* length = AddLoadFixedArrayLength(elements);
- checked_key = Add<HBoundsCheck>(key, length);
- HLoadExternalArrayPointer* external_elements =
- Add<HLoadExternalArrayPointer>(elements);
- access = AddExternalArrayElementAccess(
- external_elements, checked_key, val,
- mapcompare, elements_kind, is_store);
+ ASSERT(IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind));
+ LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
+ // Happily, mapcompare is a checked object.
+ access = BuildUncheckedMonomorphicElementAccess(
+ mapcompare, key, val,
+ map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind, is_store,
+ load_mode,
+ store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
@@ -5724,7 +5667,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown type in polymorphic element access",
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
join);
set_current_block(join);
return is_store ? NULL : Pop();
@@ -5742,8 +5685,12 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
- if (expr->IsMonomorphic()) {
- Handle<Map> map = expr->GetMonomorphicReceiverType();
+
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &types);
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
if (map->has_slow_elements_kind()) {
instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
: BuildLoadKeyedGeneric(obj, key);
@@ -5753,21 +5700,20 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
- } else if (expr->GetReceiverTypes() != NULL &&
- !expr->GetReceiverTypes()->is_empty()) {
+ } else if (types != NULL && !types->is_empty()) {
return HandlePolymorphicElementAccess(
- obj, key, val, expr, ast_id, position, is_store,
+ obj, key, val, types, ast_id, position, is_store,
expr->GetStoreMode(), has_side_effects);
} else {
if (is_store) {
if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
- Add<HDeoptimize>("Insufficient feedback for keyed store",
+ Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->IsUninitialized()) {
- Add<HDeoptimize>("Insufficient feedback for keyed load",
+ Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
instr = BuildLoadKeyedGeneric(obj, key);
@@ -5846,8 +5792,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
}
} else {
Push(graph()->GetArgumentsObject());
- VisitForValue(expr->key());
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
HValue* key = Pop();
Drop(1); // Arguments object.
if (function_state()->outer() == NULL) {
@@ -5872,15 +5817,20 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
}
-void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
-
- if (TryArgumentsAccess(expr)) return;
+void HOptimizedGraphBuilder::PushLoad(Property* expr,
+ HValue* object,
+ HValue* key,
+ int position) {
+ ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
+ Push(object);
+ if (key != NULL) Push(key);
+ BuildLoad(expr, position, expr->LoadId());
+}
- CHECK_ALIVE(VisitForValue(expr->obj()));
+void HOptimizedGraphBuilder::BuildLoad(Property* expr,
+ int position,
+ BailoutId ast_id) {
HInstruction* instr = NULL;
if (expr->IsStringLength()) {
HValue* string = Pop();
@@ -5889,7 +5839,6 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
instr = BuildLoadStringLength(string, checkstring);
} else if (expr->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(expr->key()));
HValue* index = Pop();
HValue* string = Pop();
HValue* context = environment()->context();
@@ -5905,86 +5854,105 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
HValue* object = Top();
- Handle<Map> map;
- bool monomorphic = false;
- if (expr->IsMonomorphic()) {
- map = types->first();
- monomorphic = !map->is_dictionary_map();
- } else if (object->HasMonomorphicJSObjectType()) {
- map = object->GetMonomorphicJSObjectMap();
- monomorphic = !map->is_dictionary_map();
- }
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+
if (monomorphic) {
+ Handle<Map> map = types->first();
Handle<JSFunction> getter;
Handle<JSObject> holder;
if (LookupGetter(map, name, &getter, &holder)) {
AddCheckConstantFunction(holder, Top(), map);
- if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
+ if (FLAG_inline_accessors &&
+ TryInlineGetter(getter, ast_id, expr->LoadId())) {
+ return;
+ }
Add<HPushArgument>(Pop());
instr = new(zone()) HCallConstantFunction(getter, 1);
} else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
+ instr = BuildLoadNamedMonomorphic(Pop(), name, map);
}
} else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(expr, Pop(), types, name);
+ return HandlePolymorphicLoadNamedField(
+ position, ast_id, Pop(), types, name);
} else {
instr = BuildLoadNamedGeneric(Pop(), name, expr);
}
} else {
- CHECK_ALIVE(VisitForValue(expr->key()));
-
HValue* key = Pop();
HValue* obj = Pop();
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, expr->id(), expr->position(),
+ obj, key, NULL, expr, ast_id, position,
false, // is_store
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
} else {
Push(load);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
return ast_context()->ReturnValue(load);
}
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ instr->set_position(position);
+ return ast_context()->ReturnInstruction(instr, ast_id);
}
-void HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
- CompilationInfo* info) {
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+
+ if (TryArgumentsAccess(expr)) return;
+
+ CHECK_ALIVE(VisitForValue(expr->obj()));
+ if ((!expr->IsStringLength() &&
+ !expr->IsFunctionPrototype() &&
+ !expr->key()->IsPropertyName()) ||
+ expr->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(expr->key()));
+ }
+
+ BuildLoad(expr, expr->position(), expr->id());
+}
+
+
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
+ CompilationInfo* info) {
HConstant* constant_value = New<HConstant>(constant);
if (constant->map()->CanOmitMapChecks()) {
constant->map()->AddDependentCompilationInfo(
DependentCode::kPrototypeCheckGroup, info);
- return;
+ return constant_value;
}
AddInstruction(constant_value);
HCheckMaps* check =
Add<HCheckMaps>(constant_value, handle(constant->map()), info);
check->ClearGVNFlag(kDependsOnElementsKind);
+ return check;
}
-void HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder) {
- BuildConstantMapCheck(prototype, top_info());
+HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder) {
while (!prototype.is_identical_to(holder)) {
- prototype = handle(JSObject::cast(prototype->GetPrototype()));
BuildConstantMapCheck(prototype, top_info());
+ prototype = handle(JSObject::cast(prototype->GetPrototype()));
}
+
+ HInstruction* checked_object = BuildConstantMapCheck(prototype, top_info());
+ if (!checked_object->IsLinked()) AddInstruction(checked_object);
+ return checked_object;
}
@@ -6384,7 +6352,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return false;
}
AstProperties::Flags* flags(function->flags());
- if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
+ if (flags->Contains(kDontInline) || function->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [late]");
return false;
}
@@ -6630,13 +6598,14 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
+ BailoutId ast_id,
+ BailoutId return_id) {
return TryInline(CALL_AS_METHOD,
getter,
0,
NULL,
- prop->id(),
- prop->LoadId(),
+ ast_id,
+ return_id,
GETTER_CALL_RETURN);
}
@@ -6810,8 +6779,6 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
} else if (exponent == 2.0) {
result = HMul::New(zone(), context, left, left);
}
- } else if (right->EqualsInteger32Constant(2)) {
- result = HMul::New(zone(), context, left, left);
}
if (result == NULL) {
@@ -6893,14 +6860,12 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
- VisitForValue(prop->obj());
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(prop->obj()), true);
HValue* function = Top();
AddCheckConstantFunction(expr->holder(), function, function_map);
Drop(1);
- VisitForValue(args->at(0));
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(args->at(0)), true);
HValue* receiver = Pop();
if (function_state()->outer() == NULL) {
@@ -6931,7 +6896,8 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
Handle<JSFunction> known_function;
if (function->IsConstant()) {
HConstant* constant_function = HConstant::cast(function);
- known_function = Handle<JSFunction>::cast(constant_function->handle());
+ known_function = Handle<JSFunction>::cast(
+ constant_function->handle(isolate()));
int args_count = arguments_count - 1; // Excluding receiver.
if (TryInlineApply(known_function, expr, args_count)) return true;
}
@@ -6993,23 +6959,19 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
+ HValue* receiver =
+ environment()->ExpressionStackAt(expr->arguments()->length());
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> receiver_map;
- if (monomorphic) {
- receiver_map = (types == NULL || types->is_empty())
- ? Handle<Map>::null()
- : types->first();
+ SmallMapList* types;
+ bool was_monomorphic = expr->IsMonomorphic();
+ bool monomorphic = ComputeReceiverTypes(expr, receiver, &types);
+ if (!was_monomorphic && monomorphic) {
+ monomorphic = expr->ComputeTarget(types->first(), name);
}
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
if (monomorphic) {
- if (TryInlineBuiltinMethodCall(expr,
- receiver,
- receiver_map,
- expr->check_type())) {
+ Handle<Map> map = types->first();
+ if (TryInlineBuiltinMethodCall(expr, receiver, map, expr->check_type())) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -7027,7 +6989,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
call = PreProcessCall(
new(zone()) HCallNamed(context, name, argument_count));
} else {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, map);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
@@ -7075,7 +7037,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Pop();
- Add<HCheckFunction>(function, expr->target());
+ Add<HCheckValue>(function, expr->target());
// Replace the global object with the global receiver.
HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
@@ -7127,7 +7089,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
HGlobalReceiver* receiver = New<HGlobalReceiver>(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- Add<HCheckFunction>(function, expr->target());
+ Add<HCheckValue>(function, expr->target());
if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
if (FLAG_trace_inlining) {
@@ -7190,7 +7152,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
- HValue* check = Add<HCheckFunction>(function, constructor);
+ HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
@@ -7279,10 +7241,10 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HCallNew* call;
+ HBinaryCall* call;
if (expr->target().is_identical_to(array_function)) {
Handle<Cell> cell = expr->allocation_info_cell();
- Add<HCheckFunction>(constructor, array_function);
+ Add<HCheckValue>(constructor, array_function);
call = new(zone()) HCallNewArray(context, constructor, argument_count,
cell, expr->elements_kind());
} else {
@@ -7490,13 +7452,33 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
HConstant* delta = (expr->op() == Token::INC)
? graph()->GetConstant1()
: graph()->GetConstantMinus1();
- HInstruction* instr = Add<HAdd>(Top(), delta);
+ HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
+ if (instr->IsAdd()) {
+ HAdd* add = HAdd::cast(instr);
+ add->set_observed_input_representation(1, rep);
+ add->set_observed_input_representation(2, Representation::Smi());
+ }
instr->SetFlag(HInstruction::kCannotBeTagged);
instr->ClearAllSideEffects();
return instr;
}
+void HOptimizedGraphBuilder::BuildStoreForEffect(Expression* expr,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ EffectContext for_effect(this);
+ Push(object);
+ if (key != NULL) Push(key);
+ Push(value);
+ BuildStore(expr, prop, ast_id, return_id);
+}
+
+
void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7573,86 +7555,42 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
return Bailout(kLookupVariableInCountOperation);
}
- } else {
- // Argument of the count operation is a property.
- ASSERT(prop != NULL);
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- if (returns_original_input) Push(graph()->GetConstantUndefined());
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load = NULL;
- bool monomorphic = prop->IsMonomorphic();
- SmallMapList* types = prop->GetReceiverTypes();
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else if (types != NULL && types->length() > 1) {
- load = TryLoadPolymorphicAsMonomorphic(prop, object, types, name);
- }
- if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
- }
+ Drop(returns_original_input ? 2 : 1);
+ return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ }
- after = BuildIncrement(returns_original_input, expr);
- HValue* result = returns_original_input ? Pop() : after;
+ // Argument of the count operation is a property.
+ ASSERT(prop != NULL);
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
- return BuildStoreNamed(expr, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, after, result);
- } else {
- // Keyed property.
- if (returns_original_input) Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* object = Top();
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
-
- after = BuildIncrement(returns_original_input, expr);
- input = environment()->ExpressionStackAt(0);
-
- HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
-
- // Drop the key and the original value from the bailout environment.
- // Overwrite the receiver with the result of the operation, and the
- // placeholder with the original value if necessary.
- Drop(2);
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- ASSERT(has_side_effects); // Stores always have side effects.
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
+ HValue* key = NULL;
+ if ((!prop->IsStringLength() &&
+ !prop->IsFunctionPrototype() &&
+ !prop->key()->IsPropertyName()) ||
+ prop->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ key = Top();
}
- Drop(returns_original_input ? 2 : 1);
- return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
+
+ after = BuildIncrement(returns_original_input, expr);
+
+ if (returns_original_input) {
+ input = Pop();
+ // Drop object and key to push it again in the effect context below.
+ Drop(key == NULL ? 1 : 2);
+ environment()->SetExpressionStackAt(0, input);
+ CHECK_ALIVE(BuildStoreForEffect(
+ expr, prop, expr->id(), expr->AssignmentId(), object, key, after));
+ return ast_context()->ReturnValue(Pop());
+ }
+
+ environment()->SetExpressionStackAt(0, after);
+ return BuildStore(expr, prop, expr->id(), expr->AssignmentId());
}
@@ -7775,13 +7713,13 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
}
if (left_type->Is(Type::None())) {
- Add<HDeoptimize>("Insufficient type feedback for left side",
+ Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous defaults.
left_type = handle(Type::Any(), isolate());
}
if (right_type->Is(Type::None())) {
- Add<HDeoptimize>("Insufficient type feedback for right side",
+ Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
}
@@ -8014,12 +7952,15 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
}
-static bool IsLiteralCompareBool(HValue* left,
+static bool IsLiteralCompareBool(Isolate* isolate,
+ HValue* left,
Token::Value op,
HValue* right) {
return op == Token::EQ_STRICT &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+ ((left->IsConstant() &&
+ HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->handle(isolate)->IsBoolean()));
}
@@ -8071,7 +8012,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
- if (IsLiteralCompareBool(left, op, right)) {
+ if (IsLiteralCompareBool(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
result->set_position(expr->position());
@@ -8109,7 +8050,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- Add<HCheckFunction>(right, target);
+ Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
new(zone()) HInstanceOfKnownGlobal(context, left, target);
result->set_position(expr->position());
@@ -8132,7 +8073,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
- Add<HDeoptimize>("insufficient type feedback for combined type",
+ Add<HDeoptimize>("Insufficient type feedback for combined type "
+ "of binary operation",
Deoptimizer::SOFT);
combined_type = left_type = right_type = handle(Type::Any(), isolate());
}
@@ -8202,21 +8144,23 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
- HIfContinuation continuation;
if (expr->op() == Token::EQ_STRICT) {
- IfBuilder if_nil(this);
- if_nil.If<HCompareObjectEqAndBranch>(
- value, (nil == kNullValue) ? graph()->GetConstantNull()
- : graph()->GetConstantUndefined());
- if_nil.Then();
- if_nil.Else();
- if_nil.CaptureContinuation(&continuation);
+ HConstant* nil_constant = nil == kNullValue
+ ? graph()->GetConstantNull()
+ : graph()->GetConstantUndefined();
+ HCompareObjectEqAndBranch* instr =
+ New<HCompareObjectEqAndBranch>(value, nil_constant);
+ instr->set_position(expr->position());
+ return ast_context()->ReturnControl(instr, expr->id());
+ } else {
+ ASSERT_EQ(Token::EQ, expr->op());
+ Handle<Type> type = expr->combined_type()->Is(Type::None())
+ ? handle(Type::Any(), isolate_)
+ : expr->combined_type();
+ HIfContinuation continuation;
+ BuildCompareNil(value, type, expr->position(), &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
- Handle<Type> type = expr->combined_type()->Is(Type::None())
- ? handle(Type::Any(), isolate_) : expr->combined_type();
- BuildCompareNil(value, type, expr->position(), &continuation);
- return ast_context()->ReturnContinuation(&continuation, expr->id());
}
@@ -8233,160 +8177,88 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
- HValue* context,
Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- Handle<Object> allocation_site,
- int data_size,
- int pointer_size,
+ Handle<Object> allocation_site_object,
AllocationSiteMode mode) {
NoObservableSideEffectsScope no_effects(this);
- InstanceType instance_type = boilerplate_object->map()->instance_type();
- ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
- HType type = instance_type == JS_ARRAY_TYPE
- ? HType::JSArray() : HType::JSObject();
- HInstruction* target = NULL;
- HInstruction* data_target = NULL;
-
- if (isolate()->heap()->GetPretenureMode() == TENURED) {
- if (data_size != 0) {
- HValue* size_in_bytes = Add<HConstant>(data_size);
- data_target = Add<HAllocate>(size_in_bytes, HType::JSObject(), TENURED,
- FIXED_DOUBLE_ARRAY_TYPE);
- Handle<Map> free_space_map = isolate()->factory()->free_space_map();
- AddStoreMapConstant(data_target, free_space_map);
- HObjectAccess access =
- HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
- Add<HStoreNamedField>(data_target, access, size_in_bytes);
- }
- if (pointer_size != 0) {
- HValue* size_in_bytes = Add<HConstant>(pointer_size);
- target = Add<HAllocate>(size_in_bytes, type, TENURED, instance_type);
- }
- } else {
- HValue* size_in_bytes = Add<HConstant>(data_size + pointer_size);
- target = Add<HAllocate>(size_in_bytes, type, NOT_TENURED, instance_type);
- }
-
- int offset = 0;
- int data_offset = 0;
- BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object,
- allocation_site, target, &offset, data_target,
- &data_offset, mode);
- return target;
-}
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
+ int object_size = boilerplate_object->map()->instance_size();
+ int object_offset = object_size;
-void HOptimizedGraphBuilder::BuildEmitDeepCopy(
- Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- Handle<Object> allocation_site_object,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset,
- AllocationSiteMode mode) {
+ InstanceType instance_type = boilerplate_object->map()->instance_type();
bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- AllocationSite::CanTrack(boilerplate_object->map()->instance_type());
+ AllocationSite::CanTrack(instance_type);
// If using allocation sites, then the payload on the site should already
// be filled in as a valid (boilerplate) array.
ASSERT(!create_allocation_site_info ||
AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
- HInstruction* allocation_site = NULL;
-
if (create_allocation_site_info) {
- allocation_site = Add<HConstant>(allocation_site_object);
+ object_size += AllocationMemento::kSize;
}
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(boilerplate_object->elements());
- Handle<FixedArrayBase> original_elements(
- original_boilerplate_object->elements());
- ElementsKind kind = boilerplate_object->map()->elements_kind();
+ ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
+ HType type = instance_type == JS_ARRAY_TYPE
+ ? HType::JSArray() : HType::JSObject();
+ HValue* object_size_constant = Add<HConstant>(object_size);
+ HInstruction* object = Add<HAllocate>(object_size_constant, type,
+ isolate()->heap()->GetPretenureMode(), instance_type);
+
+
+ BuildEmitObjectHeader(boilerplate_object, object);
+
+ if (create_allocation_site_info) {
+ HInstruction* allocation_site = Add<HConstant>(allocation_site_object);
+ BuildCreateAllocationMemento(object, object_offset, allocation_site);
+ }
- int object_offset = *offset;
- int object_size = boilerplate_object->map()->instance_size();
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
- int elements_offset = 0;
- if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
- elements_offset = *data_offset;
- *data_offset += elements_size;
- } else {
- // Place elements right after this object.
- elements_offset = *offset + object_size;
- *offset += elements_size;
+ HInstruction* object_elements = NULL;
+ if (elements_size > 0) {
+ HValue* object_elements_size = Add<HConstant>(elements_size);
+ if (boilerplate_object->HasFastDoubleElements()) {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ }
}
- // Increase the offset so that subsequent objects end up right after this
- // object (and it's elements if they are allocated in the same space).
- *offset += object_size;
+ BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
+
// Copy object elements if non-COW.
- HValue* object_elements = BuildEmitObjectHeader(boilerplate_object, target,
- data_target, object_offset, elements_offset, elements_size);
if (object_elements != NULL) {
- BuildEmitElements(elements, original_elements, kind, object_elements,
- target, offset, data_target, data_offset);
+ BuildEmitElements(boilerplate_object, elements, object_elements);
}
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- HValue* object_properties =
- Add<HInnerAllocatedObject>(target, object_offset);
- BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
- object_properties, target, offset, data_target, data_offset);
- }
-
- // Create allocation site info.
- if (mode == TRACK_ALLOCATION_SITE &&
- AllocationSite::CanTrack(boilerplate_object->map()->instance_type())) {
- elements_offset += AllocationMemento::kSize;
- *offset += AllocationMemento::kSize;
- BuildCreateAllocationMemento(target, JSArray::kSize, allocation_site);
+ BuildEmitInObjectProperties(boilerplate_object, object);
}
+ return object;
}
-HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
+void HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSObject> boilerplate_object,
- HInstruction* target,
- HInstruction* data_target,
- int object_offset,
- int elements_offset,
- int elements_size) {
+ HInstruction* object) {
ASSERT(boilerplate_object->properties()->length() == 0);
- HValue* result = NULL;
- HValue* object_header = Add<HInnerAllocatedObject>(target, object_offset);
Handle<Map> boilerplate_object_map(boilerplate_object->map());
- AddStoreMapConstant(object_header, boilerplate_object_map);
-
- HInstruction* elements;
- if (elements_size == 0) {
- Handle<Object> elements_field =
- Handle<Object>(boilerplate_object->elements(), isolate());
- elements = Add<HConstant>(elements_field);
- } else {
- if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
- elements = Add<HInnerAllocatedObject>(data_target, elements_offset);
- } else {
- elements = Add<HInnerAllocatedObject>(target, elements_offset);
- }
- result = elements;
- }
- Add<HStoreNamedField>(object_header, HObjectAccess::ForElementsPointer(),
- elements);
+ AddStoreMapConstant(object, boilerplate_object_map);
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
HInstruction* properties = Add<HConstant>(properties_field);
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
- Add<HStoreNamedField>(object_header, access, properties);
+ Add<HStoreNamedField>(object, access, properties);
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@@ -8396,22 +8268,30 @@ HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
HInstruction* length = Add<HConstant>(length_field);
ASSERT(boilerplate_array->length()->IsSmi());
- Add<HStoreNamedField>(object_header, HObjectAccess::ForArrayLength(
+ Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
boilerplate_array->GetElementsKind()), length);
}
+}
- return result;
+
+void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements) {
+ ASSERT(boilerplate_object->properties()->length() == 0);
+ if (object_elements == NULL) {
+ Handle<Object> elements_field =
+ Handle<Object>(boilerplate_object->elements(), isolate());
+ object_elements = Add<HConstant>(elements_field);
+ }
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ object_elements);
}
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- HValue* object_properties,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HInstruction* object) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -8435,31 +8315,20 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
- isolate()));
- HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
- *offset);
-
- Add<HStoreNamedField>(object_properties, access, value_instruction);
- BuildEmitDeepCopy(value_object, original_value_object,
- Handle<Object>::null(), target,
- offset, data_target, data_offset,
- DONT_TRACK_ALLOCATION_SITE);
+ HInstruction* result =
+ BuildFastLiteral(value_object,
+ Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
HInstruction* value_instruction = Add<HConstant>(value);
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
- HInstruction* double_box;
- if (data_target != NULL) {
- double_box = Add<HInnerAllocatedObject>(data_target, *data_offset);
- *data_offset += HeapNumber::kSize;
- } else {
- double_box = Add<HInnerAllocatedObject>(target, *offset);
- *offset += HeapNumber::kSize;
- }
+ HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+ HInstruction* double_box =
+ Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
+ isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
@@ -8467,7 +8336,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
value_instruction = double_box;
}
- Add<HStoreNamedField>(object_properties, access, value_instruction);
+ Add<HStoreNamedField>(object, access, value_instruction);
}
}
@@ -8478,31 +8347,25 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
- Add<HStoreNamedField>(object_properties, access, value_instruction);
+ Add<HStoreNamedField>(object, access, value_instruction);
}
}
void HOptimizedGraphBuilder::BuildEmitElements(
+ Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
- ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HValue* object_elements) {
+ ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
-
BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
// Copy elements backing store content.
if (elements->IsFixedDoubleArray()) {
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
- BuildEmitFixedArray(elements, original_elements, kind, object_elements,
- target, offset, data_target, data_offset);
+ BuildEmitFixedArray(elements, kind, object_elements);
} else {
UNREACHABLE();
}
@@ -8530,32 +8393,20 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HValue* object_elements) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- Handle<FixedArray> original_fast_elements =
- Handle<FixedArray>::cast(original_elements);
for (int i = 0; i < elements_length; i++) {
Handle<Object> value(fast_elements->get(i), isolate());
HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_fast_elements->get(i), isolate()));
- HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
- *offset);
- Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
- BuildEmitDeepCopy(value_object, original_value_object,
- Handle<Object>::null(), target,
- offset, data_target, data_offset,
- DONT_TRACK_ALLOCATION_SITE);
+ HInstruction* result =
+ BuildFastLiteral(value_object,
+ Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ Add<HStoreKeyed>(object_elements, key_constant, result, kind);
} else {
HInstruction* value_instruction =
Add<HLoadKeyed>(boilerplate_elements, key_constant,
@@ -9555,7 +9406,7 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- ASSERT(!FLAG_parallel_recompilation);
+ ASSERT(!FLAG_concurrent_recompilation);
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
@@ -9563,7 +9414,7 @@ void HTracer::TraceLithium(const char* name, LChunk* chunk) {
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- ASSERT(!FLAG_parallel_recompilation);
+ ASSERT(!FLAG_concurrent_recompilation);
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
@@ -9773,15 +9624,15 @@ void HStatistics::Initialize(CompilationInfo* info) {
void HStatistics::Print() {
PrintF("Timing results:\n");
- int64_t sum = 0;
- for (int i = 0; i < timing_.length(); ++i) {
- sum += timing_[i];
+ TimeDelta sum;
+ for (int i = 0; i < times_.length(); ++i) {
+ sum += times_[i];
}
for (int i = 0; i < names_.length(); ++i) {
PrintF("%32s", names_[i]);
- double ms = static_cast<double>(timing_[i]) / 1000;
- double percent = static_cast<double>(timing_[i]) * 100 / sum;
+ double ms = times_[i].InMillisecondsF();
+ double percent = times_[i].PercentOf(sum);
PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
@@ -9791,29 +9642,29 @@ void HStatistics::Print() {
PrintF("----------------------------------------"
"---------------------------------------\n");
- int64_t total = create_graph_ + optimize_graph_ + generate_code_;
+ TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Create graph",
- static_cast<double>(create_graph_) / 1000,
- static_cast<double>(create_graph_) * 100 / total);
+ create_graph_.InMillisecondsF(),
+ create_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Optimize graph",
- static_cast<double>(optimize_graph_) / 1000,
- static_cast<double>(optimize_graph_) * 100 / total);
+ optimize_graph_.InMillisecondsF(),
+ optimize_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Generate and install code",
- static_cast<double>(generate_code_) / 1000,
- static_cast<double>(generate_code_) * 100 / total);
+ generate_code_.InMillisecondsF(),
+ generate_code_.PercentOf(total));
PrintF("----------------------------------------"
"---------------------------------------\n");
PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
"Total",
- static_cast<double>(total) / 1000,
- static_cast<double>(total) / full_code_gen_);
+ total.InMillisecondsF(),
+ total.TimesOf(full_code_gen_));
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(total) / 1000) / source_size_in_kb
+ ? total.InMillisecondsF() / source_size_in_kb
: 0;
double normalized_size_in_kb = source_size_in_kb > 0
? total_size_ / 1024 / source_size_in_kb
@@ -9824,17 +9675,17 @@ void HStatistics::Print() {
}
-void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
+void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (strcmp(names_[i], name) == 0) {
- timing_[i] += ticks;
+ times_[i] += time;
sizes_[i] += size;
return;
}
}
names_.Add(name);
- timing_.Add(ticks);
+ times_.Add(time);
sizes_.Add(size);
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 004aa16a8..c1dafa8b5 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -53,10 +53,10 @@ class LChunk;
class LiveRange;
-class HBasicBlock: public ZoneObject {
+class HBasicBlock V8_FINAL : public ZoneObject {
public:
explicit HBasicBlock(HGraph* graph);
- virtual ~HBasicBlock() { }
+ ~HBasicBlock() { }
// Simple accessors.
int block_id() const { return block_id_; }
@@ -220,7 +220,7 @@ class HBasicBlock: public ZoneObject {
};
-class HPredecessorIterator BASE_EMBEDDED {
+class HPredecessorIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HPredecessorIterator(HBasicBlock* block)
: predecessor_list_(block->predecessors()), current_(0) { }
@@ -235,7 +235,7 @@ class HPredecessorIterator BASE_EMBEDDED {
};
-class HInstructionIterator BASE_EMBEDDED {
+class HInstructionIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HInstructionIterator(HBasicBlock* block)
: instr_(block->first()) {
@@ -255,7 +255,7 @@ class HInstructionIterator BASE_EMBEDDED {
};
-class HLoopInformation: public ZoneObject {
+class HLoopInformation V8_FINAL : public ZoneObject {
public:
HLoopInformation(HBasicBlock* loop_header, Zone* zone)
: back_edges_(4, zone),
@@ -264,7 +264,7 @@ class HLoopInformation: public ZoneObject {
stack_check_(NULL) {
blocks_.Add(loop_header, zone);
}
- virtual ~HLoopInformation() {}
+ ~HLoopInformation() {}
const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
@@ -303,7 +303,7 @@ class HLoopInformation: public ZoneObject {
class BoundsCheckTable;
class InductionVariableBlocksTable;
-class HGraph: public ZoneObject {
+class HGraph V8_FINAL : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
@@ -320,7 +320,6 @@ class HGraph: public ZoneObject {
bool ProcessArgumentsObject();
void OrderBlocks();
void AssignDominators();
- void SetupInformativeDefinitions();
void RestoreActualValues();
// Returns false if there are phi-uses of the arguments-object
@@ -468,9 +467,6 @@ class HGraph: public ZoneObject {
phase.Run();
}
- void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
- void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
- void SetupInformativeDefinitionsRecursively(HBasicBlock* block);
void EliminateRedundantBoundsChecksUsingInductionVariables();
Isolate* isolate_;
@@ -523,7 +519,7 @@ enum FrameType {
};
-class HEnvironment: public ZoneObject {
+class HEnvironment V8_FINAL : public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
@@ -557,9 +553,6 @@ class HEnvironment: public ZoneObject {
void set_entry(HEnterInlined* entry) { entry_ = entry; }
int length() const { return values_.length(); }
- bool is_special_index(int i) const {
- return i >= parameter_count() && i < parameter_count() + specials_count();
- }
int first_expression_index() const {
return parameter_count() + specials_count() + local_count();
@@ -678,8 +671,15 @@ class HEnvironment: public ZoneObject {
}
bool is_local_index(int i) const {
- return i >= first_local_index() &&
- i < first_expression_index();
+ return i >= first_local_index() && i < first_expression_index();
+ }
+
+ bool is_parameter_index(int i) const {
+ return i >= 0 && i < parameter_count();
+ }
+
+ bool is_special_index(int i) const {
+ return i >= parameter_count() && i < parameter_count() + specials_count();
}
void PrintTo(StringStream* stream);
@@ -793,33 +793,37 @@ class AstContext {
};
-class EffectContext: public AstContext {
+class EffectContext V8_FINAL : public AstContext {
public:
explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id);
+ BailoutId ast_id) V8_OVERRIDE;
};
-class ValueContext: public AstContext {
+class ValueContext V8_FINAL : public AstContext {
public:
ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id);
+ BailoutId ast_id) V8_OVERRIDE;
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -828,7 +832,7 @@ class ValueContext: public AstContext {
};
-class TestContext: public AstContext {
+class TestContext V8_FINAL : public AstContext {
public:
TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
@@ -840,11 +844,13 @@ class TestContext: public AstContext {
if_false_(if_false) {
}
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id);
+ BailoutId ast_id) V8_OVERRIDE;
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@@ -866,7 +872,7 @@ class TestContext: public AstContext {
};
-class FunctionState {
+class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
@@ -933,7 +939,7 @@ class FunctionState {
};
-class HIfContinuation {
+class HIfContinuation V8_FINAL {
public:
HIfContinuation() { continuation_captured_ = false; }
~HIfContinuation() { ASSERT(!continuation_captured_); }
@@ -1056,7 +1062,7 @@ class HGraphBuilder {
template<class I, class P1, class P2>
I* Add(P1 p1, P2 p2) {
- return static_cast<I*>(AddUncasted<I>(p1, p2));
+ return I::cast(AddUncasted<I>(p1, p2));
}
template<class I, class P1, class P2, class P3>
@@ -1223,10 +1229,9 @@ class HGraphBuilder {
bool is_jsarray);
HInstruction* BuildUncheckedMonomorphicElementAccess(
- HValue* object,
+ HValue* checked_object,
HValue* key,
HValue* val,
- HCheckMaps* mapcheck,
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
@@ -1251,13 +1256,10 @@ class HGraphBuilder {
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
- HLoadNamedField* BuildLoadNamedField(
- HValue* object,
- HObjectAccess access,
- HValue* typecheck);
- HInstruction* BuildLoadStringLength(HValue* object, HValue* typecheck);
- HStoreNamedField* AddStoreMapConstant(HValue *object, Handle<Map>);
- HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck);
+ HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
+ HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
+ HLoadNamedField* AddLoadElements(HValue* object);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
@@ -1272,7 +1274,7 @@ class HGraphBuilder {
void AddIncrementCounter(StatsCounter* counter,
HValue* context);
- class IfBuilder {
+ class IfBuilder V8_FINAL {
public:
explicit IfBuilder(HGraphBuilder* builder,
int position = RelocInfo::kNoPosition);
@@ -1402,7 +1404,7 @@ class HGraphBuilder {
HBasicBlock* merge_block_;
};
- class LoopBuilder {
+ class LoopBuilder V8_FINAL {
public:
enum Direction {
kPreIncrement,
@@ -1414,6 +1416,11 @@ class HGraphBuilder {
LoopBuilder(HGraphBuilder* builder,
HValue* context,
Direction direction);
+ LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ Direction direction,
+ HValue* increment_amount);
+
~LoopBuilder() {
ASSERT(finished_);
}
@@ -1422,6 +1429,9 @@ class HGraphBuilder {
HValue* initial,
HValue* terminating,
Token::Value token);
+
+ void Break();
+
void EndBody();
private:
@@ -1429,11 +1439,13 @@ class HGraphBuilder {
HGraphBuilder* builder_;
HValue* context_;
+ HValue* increment_amount_;
HInstruction* increment_;
HPhi* phi_;
HBasicBlock* header_block_;
HBasicBlock* body_block_;
HBasicBlock* exit_block_;
+ HBasicBlock* exit_trampoline_block_;
Direction direction_;
bool finished_;
};
@@ -1443,7 +1455,7 @@ class HGraphBuilder {
void BuildNewSpaceArrayCheck(HValue* length,
ElementsKind kind);
- class JSArrayBuilder {
+ class JSArrayBuilder V8_FINAL {
public:
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
@@ -1543,9 +1555,10 @@ class HGraphBuilder {
int previous_object_size,
HValue* payload);
- void BuildConstantMapCheck(Handle<JSObject> constant, CompilationInfo* info);
- void BuildCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder);
+ HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
+ CompilationInfo* info);
+ HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder);
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetArrayFunction();
@@ -1599,22 +1612,6 @@ inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
template<>
-inline HInstruction* HGraphBuilder::NewUncasted<HLoadNamedField>(
- HValue* object, HObjectAccess access) {
- return NewUncasted<HLoadNamedField>(object, access,
- static_cast<HValue*>(NULL));
-}
-
-
-template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HLoadNamedField>(
- HValue* object, HObjectAccess access) {
- return AddUncasted<HLoadNamedField>(object, access,
- static_cast<HValue*>(NULL));
-}
-
-
-template<>
inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
return AddUncasted<HSimulate>(id, FIXED_SIMULATE);
}
@@ -1642,12 +1639,13 @@ inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
}
-class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
+class HOptimizedGraphBuilder V8_FINAL
+ : public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
- class BreakAndContinueInfo BASE_EMBEDDED {
+ class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED {
public:
explicit BreakAndContinueInfo(BreakableStatement* target,
int drop_extra = 0)
@@ -1673,7 +1671,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// A helper class to maintain a stack of current BreakAndContinueInfo
// structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope BASE_EMBEDDED {
+ class BreakAndContinueScope V8_FINAL BASE_EMBEDDED {
public:
BreakAndContinueScope(BreakAndContinueInfo* info,
HOptimizedGraphBuilder* owner)
@@ -1699,7 +1697,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
explicit HOptimizedGraphBuilder(CompilationInfo* info);
- virtual bool BuildGraph();
+ virtual bool BuildGraph() V8_OVERRIDE;
// Simple accessors.
BreakAndContinueScope* break_scope() const { return break_scope_; }
@@ -1885,9 +1883,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
void SetUpScope(Scope* scope);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
+ virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) V8_OVERRIDE;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -1917,7 +1915,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool TryInlineCall(Call* expr, bool drop_extra = false);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
- bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
+ bool TryInlineGetter(Handle<JSFunction> getter,
+ BailoutId ast_id,
+ BailoutId return_id);
bool TryInlineSetter(Handle<JSFunction> setter,
BailoutId id,
BailoutId assignment_id,
@@ -1945,26 +1945,24 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(Property* expr,
+ void HandlePolymorphicLoadNamedField(int position,
+ BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name);
- HInstruction* TryLoadPolymorphicAsMonomorphic(Property* expr,
- HValue* object,
+ HInstruction* TryLoadPolymorphicAsMonomorphic(HValue* object,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicStoreNamedField(int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
- HValue* result,
SmallMapList* types,
Handle<String> name);
bool TryStorePolymorphicAsMonomorphic(int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
- HValue* result,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicCallNamed(Call* expr,
@@ -1997,6 +1995,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* val,
SmallMapList* maps);
+ LoadKeyedHoleMode BuildKeyedHoleMode(Handle<Map> map);
+
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@@ -2008,7 +2008,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- Expression* prop,
+ SmallMapList* maps,
BailoutId ast_id,
int position,
bool is_store,
@@ -2033,19 +2033,31 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Handle<JSObject> holder);
HInstruction* BuildLoadNamedMonomorphic(HValue* object,
Handle<String> name,
- Property* expr,
Handle<Map> map);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
- void BuildStoreNamed(Expression* expression,
- BailoutId id,
- int position,
- BailoutId assignment_id,
- Property* prop,
- HValue* object,
- HValue* store_value,
- HValue* result_value);
+ void BuildLoad(Property* property,
+ int position,
+ BailoutId ast_id);
+ void PushLoad(Property* property,
+ HValue* object,
+ HValue* key,
+ int position);
+
+ void BuildStoreForEffect(Expression* expression,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* key,
+ HValue* value);
+
+ void BuildStore(Expression* expression,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool is_uninitialized = false);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
@@ -2067,60 +2079,31 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HInstruction* BuildThisFunction();
- HInstruction* BuildFastLiteral(HValue* context,
- Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
+ HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
Handle<Object> allocation_site,
- int data_size,
- int pointer_size,
AllocationSiteMode mode);
- void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
- Handle<JSObject> object,
- Handle<Object> allocation_site,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset,
- AllocationSiteMode mode);
-
- MUST_USE_RESULT HValue* BuildEmitObjectHeader(
- Handle<JSObject> boilerplat_object,
- HInstruction* target,
- HInstruction* data_target,
- int object_offset,
- int elements_offset,
- int elements_size);
+ void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object);
+
+ void BuildInitElementsInObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements);
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- HValue* object_properties,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
-
- void BuildEmitElements(Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
- ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ HInstruction* object);
+
+ void BuildEmitElements(Handle<JSObject> boilerplate_object,
+ Handle<FixedArrayBase> elements,
+ HValue* object_elements);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements);
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ HValue* object_elements);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -2166,44 +2149,40 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Zone* AstContext::zone() const { return owner_->zone(); }
-class HStatistics: public Malloced {
+class HStatistics V8_FINAL: public Malloced {
public:
HStatistics()
- : timing_(5),
+ : times_(5),
names_(5),
sizes_(5),
- create_graph_(0),
- optimize_graph_(0),
- generate_code_(0),
total_size_(0),
- full_code_gen_(0),
source_size_(0) { }
void Initialize(CompilationInfo* info);
void Print();
- void SaveTiming(const char* name, int64_t ticks, unsigned size);
+ void SaveTiming(const char* name, TimeDelta time, unsigned size);
- void IncrementFullCodeGen(int64_t full_code_gen) {
+ void IncrementFullCodeGen(TimeDelta full_code_gen) {
full_code_gen_ += full_code_gen;
}
- void IncrementSubtotals(int64_t create_graph,
- int64_t optimize_graph,
- int64_t generate_code) {
+ void IncrementSubtotals(TimeDelta create_graph,
+ TimeDelta optimize_graph,
+ TimeDelta generate_code) {
create_graph_ += create_graph;
optimize_graph_ += optimize_graph;
generate_code_ += generate_code;
}
private:
- List<int64_t> timing_;
+ List<TimeDelta> times_;
List<const char*> names_;
List<unsigned> sizes_;
- int64_t create_graph_;
- int64_t optimize_graph_;
- int64_t generate_code_;
+ TimeDelta create_graph_;
+ TimeDelta optimize_graph_;
+ TimeDelta generate_code_;
unsigned total_size_;
- int64_t full_code_gen_;
+ TimeDelta full_code_gen_;
double source_size_;
};
@@ -2225,7 +2204,7 @@ class HPhase : public CompilationPhase {
};
-class HTracer: public Malloced {
+class HTracer V8_FINAL : public Malloced {
public:
explicit HTracer(int isolate_id)
: trace_(&string_allocator_), indent_(0) {
@@ -2246,7 +2225,7 @@ class HTracer: public Malloced {
void TraceLiveRanges(const char* name, LAllocator* allocator);
private:
- class Tag BASE_EMBEDDED {
+ class Tag V8_FINAL BASE_EMBEDDED {
public:
Tag(HTracer* tracer, const char* name) {
name_ = name;
@@ -2311,7 +2290,7 @@ class HTracer: public Malloced {
};
-class NoObservableSideEffectsScope {
+class NoObservableSideEffectsScope V8_FINAL {
public:
explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
builder_(builder) {
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 5cfe4c43b..0ae19c823 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -28,6 +28,7 @@
#include "i18n.h"
+#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
#include "unicode/curramt.h"
@@ -38,6 +39,7 @@
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
#include "unicode/uchar.h"
@@ -731,6 +733,69 @@ void SetResolvedCollatorSettings(Isolate* isolate,
}
}
+
+icu::BreakIterator* CreateICUBreakIterator(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::BreakIterator* break_iterator = NULL;
+ icu::UnicodeString type;
+ if (!ExtractStringSetting(isolate, options, "type", &type)) return NULL;
+
+ if (type == UNICODE_STRING_SIMPLE("character")) {
+ break_iterator =
+ icu::BreakIterator::createCharacterInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
+ break_iterator =
+ icu::BreakIterator::createSentenceInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("line")) {
+ break_iterator =
+ icu::BreakIterator::createLineInstance(icu_locale, status);
+ } else {
+ // Defualt is word iterator.
+ break_iterator =
+ icu::BreakIterator::createWordInstance(icu_locale, status);
+ }
+
+ if (U_FAILURE(status)) {
+ delete break_iterator;
+ return NULL;
+ }
+
+ return break_iterator;
+}
+
+
+void SetResolvedBreakIteratorSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::BreakIterator* break_iterator,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
} // namespace
@@ -800,14 +865,14 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
- Persistent<v8::Object>* object,
+ Persistent<v8::Value>* object,
void* param) {
// First delete the hidden C++ object.
delete reinterpret_cast<icu::SimpleDateFormat*>(Handle<JSObject>::cast(
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
+ object->Dispose();
}
@@ -864,14 +929,14 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
- Persistent<v8::Object>* object,
+ Persistent<v8::Value>* object,
void* param) {
// First delete the hidden C++ object.
delete reinterpret_cast<icu::DecimalFormat*>(Handle<JSObject>::cast(
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
+ object->Dispose();
}
@@ -925,14 +990,81 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
void Collator::DeleteCollator(v8::Isolate* isolate,
- Persistent<v8::Object>* object,
+ Persistent<v8::Value>* object,
void* param) {
// First delete the hidden C++ object.
delete reinterpret_cast<icu::Collator*>(Handle<JSObject>::cast(
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
+ object->Dispose();
+}
+
+
+icu::BreakIterator* BreakIterator::InitializeBreakIterator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::BreakIterator* break_iterator = CreateICUBreakIterator(
+ isolate, icu_locale, options);
+ if (!break_iterator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ break_iterator = CreateICUBreakIterator(
+ isolate, no_extension_locale, options);
+
+ // Set resolved settings (locale).
+ SetResolvedBreakIteratorSettings(
+ isolate, no_extension_locale, break_iterator, resolved);
+ } else {
+ SetResolvedBreakIteratorSettings(
+ isolate, icu_locale, break_iterator, resolved);
+ }
+
+ return break_iterator;
+}
+
+
+icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
+ if (obj->HasLocalProperty(*key)) {
+ return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::BreakIterator*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ delete reinterpret_cast<icu::UnicodeString*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(1));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index 5825ab6c6..08e7f2b71 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -33,6 +33,7 @@
#include "v8.h"
namespace U_ICU_NAMESPACE {
+class BreakIterator;
class Collator;
class DecimalFormat;
class SimpleDateFormat;
@@ -71,7 +72,7 @@ class DateFormat {
// Release memory we allocated for the DateFormat once the JS object that
// holds the pointer gets garbage collected.
static void DeleteDateFormat(v8::Isolate* isolate,
- Persistent<v8::Object>* object,
+ Persistent<v8::Value>* object,
void* param);
private:
DateFormat();
@@ -95,7 +96,7 @@ class NumberFormat {
// Release memory we allocated for the NumberFormat once the JS object that
// holds the pointer gets garbage collected.
static void DeleteNumberFormat(v8::Isolate* isolate,
- Persistent<v8::Object>* object,
+ Persistent<v8::Value>* object,
void* param);
private:
NumberFormat();
@@ -118,12 +119,36 @@ class Collator {
// Release memory we allocated for the Collator once the JS object that holds
// the pointer gets garbage collected.
static void DeleteCollator(v8::Isolate* isolate,
- Persistent<v8::Object>* object,
+ Persistent<v8::Value>* object,
void* param);
private:
Collator();
};
+class BreakIterator {
+ public:
+ // Create a BreakIterator for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::BreakIterator* InitializeBreakIterator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks break iterator object from corresponding JavaScript object.
+ static icu::BreakIterator* UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the BreakIterator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteBreakIterator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+
+ private:
+ BreakIterator();
+};
+
} } // namespace v8::internal
#endif // V8_I18N_H_
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
new file mode 100644
index 000000000..1798bbba7
--- /dev/null
+++ b/deps/v8/src/i18n.js
@@ -0,0 +1,2116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation.
+
+/**
+ * Intl object is a single object that has some named properties,
+ * all of which are constructors.
+ */
+$Object.defineProperty(global, "Intl", { enumerable: false, value: (function() {
+
+'use strict';
+
+var Intl = {};
+
+var undefined = global.undefined;
+
+var AVAILABLE_SERVICES = ['collator',
+ 'numberformat',
+ 'dateformat',
+ 'breakiterator'];
+
+/**
+ * Caches available locales for each service.
+ */
+var AVAILABLE_LOCALES = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformat': undefined,
+ 'breakiterator': undefined
+};
+
+/**
+ * Caches default ICU locale.
+ */
+var DEFAULT_ICU_LOCALE = undefined;
+
+/**
+ * Unicode extension regular expression.
+ */
+var UNICODE_EXTENSION_RE = undefined;
+
+function GetUnicodeExtensionRE() {
+ if (UNICODE_EXTENSION_RE === undefined) {
+ UNICODE_EXTENSION_RE = new $RegExp('-u(-[a-z0-9]{2,8})+', 'g');
+ }
+ return UNICODE_EXTENSION_RE;
+}
+
+/**
+ * Matches any Unicode extension.
+ */
+var ANY_EXTENSION_RE = undefined;
+
+function GetAnyExtensionRE() {
+ if (ANY_EXTENSION_RE === undefined) {
+ ANY_EXTENSION_RE = new $RegExp('-[a-z0-9]{1}-.*', 'g');
+ }
+ return ANY_EXTENSION_RE;
+}
+
+/**
+ * Replace quoted text (single quote, anything but the quote and quote again).
+ */
+var QUOTED_STRING_RE = undefined;
+
+function GetQuotedStringRE() {
+ if (QUOTED_STRING_RE === undefined) {
+ QUOTED_STRING_RE = new $RegExp("'[^']+'", 'g');
+ }
+ return QUOTED_STRING_RE;
+}
+
+/**
+ * Matches valid service name.
+ */
+var SERVICE_RE = undefined;
+
+function GetServiceRE() {
+ if (SERVICE_RE === undefined) {
+ SERVICE_RE =
+ new $RegExp('^(collator|numberformat|dateformat|breakiterator)$');
+ }
+ return SERVICE_RE;
+}
+
+/**
+ * Validates a language tag against bcp47 spec.
+ * Actual value is assigned on first run.
+ */
+var LANGUAGE_TAG_RE = undefined;
+
+function GetLanguageTagRE() {
+ if (LANGUAGE_TAG_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_TAG_RE;
+}
+
+/**
+ * Helps find duplicate variants in the language tag.
+ */
+var LANGUAGE_VARIANT_RE = undefined;
+
+function GetLanguageVariantRE() {
+ if (LANGUAGE_VARIANT_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_VARIANT_RE;
+}
+
+/**
+ * Helps find duplicate singletons in the language tag.
+ */
+var LANGUAGE_SINGLETON_RE = undefined;
+
+function GetLanguageSingletonRE() {
+ if (LANGUAGE_SINGLETON_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_SINGLETON_RE;
+}
+
+/**
+ * Matches valid IANA time zone names.
+ */
+var TIMEZONE_NAME_CHECK_RE = undefined;
+
+function GetTimezoneNameCheckRE() {
+ if (TIMEZONE_NAME_CHECK_RE === undefined) {
+ TIMEZONE_NAME_CHECK_RE =
+ new $RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+ }
+ return TIMEZONE_NAME_CHECK_RE;
+}
+
+/**
+ * Maps ICU calendar names into LDML type.
+ */
+var ICU_CALENDAR_MAP = {
+ 'gregorian': 'gregory',
+ 'japanese': 'japanese',
+ 'buddhist': 'buddhist',
+ 'roc': 'roc',
+ 'persian': 'persian',
+ 'islamic-civil': 'islamicc',
+ 'islamic': 'islamic',
+ 'hebrew': 'hebrew',
+ 'chinese': 'chinese',
+ 'indian': 'indian',
+ 'coptic': 'coptic',
+ 'ethiopic': 'ethiopic',
+ 'ethiopic-amete-alem': 'ethioaa'
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a collator.
+ */
+var COLLATOR_KEY_MAP = {
+ 'kn': {'property': 'numeric', 'type': 'boolean'},
+ 'kf': {'property': 'caseFirst', 'type': 'string',
+ 'values': ['false', 'lower', 'upper']}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a number format.
+ */
+var NUMBER_FORMAT_KEY_MAP = {
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a date/time format.
+ */
+var DATETIME_FORMAT_KEY_MAP = {
+ 'ca': {'property': undefined, 'type': 'string'},
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Allowed -u-co- values. List taken from:
+ * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
+ */
+var ALLOWED_CO_VALUES = [
+ 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
+ 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
+];
+
+/**
+ * Error message for when function object is created with new and it's not
+ * a constructor.
+ */
+var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
+ 'Function object that\'s not a constructor was created with new';
+
+
+/**
+ * Adds bound method to the prototype of the given object.
+ */
+function addBoundMethod(obj, methodName, implementation, length) {
+ function getter() {
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject === undefined) {
+ throw new $TypeError('Method ' + methodName + ' called on a ' +
+ 'non-object or on a wrong type of object.');
+ }
+ var internalName = '__bound' + methodName + '__';
+ if (this[internalName] === undefined) {
+ var that = this;
+ var boundMethod;
+ if (length === undefined || length === 2) {
+ boundMethod = function(x, y) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x, y);
+ }
+ } else if (length === 1) {
+ boundMethod = function(x) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x);
+ }
+ } else {
+ boundMethod = function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ // DateTimeFormat.format needs to be 0 arg method, but can stil
+ // receive optional dateValue param. If one was provided, pass it
+ // along.
+ if (arguments.length > 0) {
+ return implementation(that, arguments[0]);
+ } else {
+ return implementation(that);
+ }
+ }
+ }
+ %FunctionSetName(boundMethod, internalName);
+ %FunctionRemovePrototype(boundMethod);
+ %SetNativeFlag(boundMethod);
+ this[internalName] = boundMethod;
+ }
+ return this[internalName];
+ }
+
+ %FunctionSetName(getter, methodName);
+ %FunctionRemovePrototype(getter);
+ %SetNativeFlag(getter);
+
+ $Object.defineProperty(obj.prototype, methodName, {
+ get: getter,
+ enumerable: false,
+ configurable: true
+ });
+}
+
+
+/**
+ * Returns an intersection of locales and service supported locales.
+ * Parameter locales is treated as a priority list.
+ */
+function supportedLocalesOf(service, locales, options) {
+ if (service.match(GetServiceRE()) === null) {
+ throw new $Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Provide defaults if matcher was not specified.
+ if (options === undefined) {
+ options = {};
+ } else {
+ options = toObject(options);
+ }
+
+ var matcher = options.localeMatcher;
+ if (matcher !== undefined) {
+ matcher = $String(matcher);
+ if (matcher !== 'lookup' && matcher !== 'best fit') {
+ throw new $RangeError('Illegal value for localeMatcher:' + matcher);
+ }
+ } else {
+ matcher = 'best fit';
+ }
+
+ var requestedLocales = initializeLocaleList(locales);
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ // Use either best fit or lookup algorithm to match locales.
+ if (matcher === 'best fit') {
+ return initializeLocaleList(bestFitSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+ }
+
+ return initializeLocaleList(lookupSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the BCP 47 Lookup algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
+ var matchedLocales = [];
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove -u- extension.
+ var locale = requestedLocales[i].replace(GetUnicodeExtensionRE(), '');
+ do {
+ if (availableLocales[locale] !== undefined) {
+ // Push requested locale not the resolved one.
+ matchedLocales.push(requestedLocales[i]);
+ break;
+ }
+ // Truncate locale if possible, if not break.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ return matchedLocales;
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the implementation
+ * dependent algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupSupportedLocalesOf(requestedLocales, availableLocales);
+}
+
+
+/**
+ * Returns a getOption function that extracts property value for given
+ * options object. If property is missing it returns defaultValue. If value
+ * is out of range for that property it throws RangeError.
+ */
+function getGetOption(options, caller) {
+ if (options === undefined) {
+ throw new $Error('Internal ' + caller + ' error. ' +
+ 'Default options are missing.');
+ }
+
+ var getOption = function getOption(property, type, values, defaultValue) {
+ if (options[property] !== undefined) {
+ var value = options[property];
+ switch (type) {
+ case 'boolean':
+ value = $Boolean(value);
+ break;
+ case 'string':
+ value = $String(value);
+ break;
+ case 'number':
+ value = $Number(value);
+ break;
+ default:
+ throw new $Error('Internal error. Wrong value type.');
+ }
+ if (values !== undefined && values.indexOf(value) === -1) {
+ throw new $RangeError('Value ' + value + ' out of range for ' + caller +
+ ' options property ' + property);
+ }
+
+ return value;
+ }
+
+ return defaultValue;
+ }
+
+ return getOption;
+}
+
+
+/**
+ * Compares a BCP 47 language priority list requestedLocales against the locales
+ * in availableLocales and determines the best available language to meet the
+ * request. Two algorithms are available to match the locales: the Lookup
+ * algorithm described in RFC 4647 section 3.4, and an implementation dependent
+ * best-fit algorithm. Independent of the locale matching algorithm, options
+ * specified through Unicode locale extension sequences are negotiated
+ * separately, taking the caller's relevant extension keys and locale data as
+ * well as client-provided options into consideration. Returns an object with
+ * a locale property whose value is the language tag of the selected locale,
+ * and properties for each key in relevantExtensionKeys providing the selected
+ * value for that key.
+ */
+function resolveLocale(service, requestedLocales, options) {
+ requestedLocales = initializeLocaleList(requestedLocales);
+
+ var getOption = getGetOption(options, service);
+ var matcher = getOption('localeMatcher', 'string',
+ ['lookup', 'best fit'], 'best fit');
+ var resolved;
+ if (matcher === 'lookup') {
+ resolved = lookupMatcher(service, requestedLocales);
+ } else {
+ resolved = bestFitMatcher(service, requestedLocales);
+ }
+
+ return resolved;
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using basic
+ * lookup algorithm.
+ */
+function lookupMatcher(service, requestedLocales) {
+ if (service.match(GetServiceRE()) === null) {
+ throw new $Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove all extensions.
+ var locale = requestedLocales[i].replace(GetAnyExtensionRE(), '');
+ do {
+ if (AVAILABLE_LOCALES[service][locale] !== undefined) {
+ // Return the resolved locale and extension.
+ var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE());
+ var extension = (extensionMatch === null) ? '' : extensionMatch[0];
+ return {'locale': locale, 'extension': extension, 'position': i};
+ }
+ // Truncate locale if possible.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ // Didn't find a match, return default.
+ if (DEFAULT_ICU_LOCALE === undefined) {
+ DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+ }
+
+ return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using
+ * implementation dependend algorithm.
+ */
+function bestFitMatcher(service, requestedLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupMatcher(service, requestedLocales);
+}
+
+
+/**
+ * Parses Unicode extension into key - value map.
+ * Returns empty object if the extension string is invalid.
+ * We are not concerned with the validity of the values at this point.
+ */
+function parseExtension(extension) {
+ var extensionSplit = extension.split('-');
+
+ // Assume ['', 'u', ...] input, but don't throw.
+ if (extensionSplit.length <= 2 ||
+ (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
+ return {};
+ }
+
+ // Key is {2}alphanum, value is {3,8}alphanum.
+ // Some keys may not have explicit values (booleans).
+ var extensionMap = {};
+ var previousKey = undefined;
+ for (var i = 2; i < extensionSplit.length; ++i) {
+ var length = extensionSplit[i].length;
+ var element = extensionSplit[i];
+ if (length === 2) {
+ extensionMap[element] = undefined;
+ previousKey = element;
+ } else if (length >= 3 && length <=8 && previousKey !== undefined) {
+ extensionMap[previousKey] = element;
+ previousKey = undefined;
+ } else {
+ // There is a value that's too long, or that doesn't have a key.
+ return {};
+ }
+ }
+
+ return extensionMap;
+}
+
+
+/**
+ * Converts parameter to an Object if possible.
+ */
+function toObject(value) {
+ if (value === undefined || value === null) {
+ throw new $TypeError('Value cannot be converted to an Object.');
+ }
+
+ return $Object(value);
+}
+
+
+/**
+ * Populates internalOptions object with boolean key-value pairs
+ * from extensionMap and options.
+ * Returns filtered extension (number and date format constructors use
+ * Unicode extensions for passing parameters to ICU).
+ * It's used for extension-option pairs only, e.g. kn-normalization, but not
+ * for 'sensitivity' since it doesn't have extension equivalent.
+ * Extensions like nu and ca don't have options equivalent, so we place
+ * undefined in the map.property to denote that.
+ */
+function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
+ var extension = '';
+
+ var updateExtension = function updateExtension(key, value) {
+ return '-' + key + '-' + $String(value);
+ }
+
+ var updateProperty = function updateProperty(property, type, value) {
+ if (type === 'boolean' && (typeof value === 'string')) {
+ value = (value === 'true') ? true : false;
+ }
+
+ if (property !== undefined) {
+ defineWEProperty(outOptions, property, value);
+ }
+ }
+
+ for (var key in keyValues) {
+ if (keyValues.hasOwnProperty(key)) {
+ var value = undefined;
+ var map = keyValues[key];
+ if (map.property !== undefined) {
+ // This may return true if user specifies numeric: 'false', since
+ // Boolean('nonempty') === true.
+ value = getOption(map.property, map.type, map.values);
+ }
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ continue;
+ }
+ // User options didn't have it, check Unicode extension.
+ // Here we want to convert strings 'true', 'false' into proper Boolean
+ // values (not a user error).
+ if (extensionMap.hasOwnProperty(key)) {
+ value = extensionMap[key];
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ } else if (map.type === 'boolean') {
+ // Boolean keys are allowed not to have values in Unicode extension.
+ // Those default to true.
+ updateProperty(map.property, map.type, true);
+ extension += updateExtension(key, true);
+ }
+ }
+ }
+ }
+
+ return extension === ''? '' : '-u' + extension;
+}
+
+
+/**
+ * Converts all OwnProperties into
+ * configurable: false, writable: false, enumerable: true.
+ */
+function freezeArray(array) {
+ array.forEach(function(element, index) {
+ $Object.defineProperty(array, index, {value: element,
+ configurable: false,
+ writable: false,
+ enumerable: true});
+ });
+
+ $Object.defineProperty(array, 'length', {value: array.length,
+ writable: false});
+
+ return array;
+}
+
+
+/**
+ * It's sometimes desireable to leave user requested locale instead of ICU
+ * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
+ * one, if that was what user requested).
+ * This function returns user specified tag if its maximized form matches ICU
+ * resolved locale. If not we return ICU result.
+ */
+function getOptimalLanguageTag(original, resolved) {
+ // Returns Array<Object>, where each object has maximized and base properties.
+ // Maximized: zh -> zh-Hans-CN
+ // Base: zh-CN-u-ca-gregory -> zh-CN
+ // Take care of grandfathered or simple cases.
+ if (original === resolved) {
+ return original;
+ }
+
+ var locales = %GetLanguageTagVariants([original, resolved]);
+ if (locales[0].maximized !== locales[1].maximized) {
+ return resolved;
+ }
+
+ // Preserve extensions of resolved locale, but swap base tags with original.
+ var resolvedBase = new $RegExp('^' + locales[1].base);
+ return resolved.replace(resolvedBase, locales[0].base);
+}
+
+
+/**
+ * Returns an Object that contains all of supported locales for a given
+ * service.
+ * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
+ * that is supported. This is required by the spec.
+ */
+function getAvailableLocalesOf(service) {
+ var available = %AvailableLocalesOf(service);
+
+ for (var i in available) {
+ if (available.hasOwnProperty(i)) {
+ var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
+ if (parts !== null) {
+ // Build xx-ZZ. We don't care about the actual value,
+ // as long it's not undefined.
+ available[parts[1] + '-' + parts[3]] = null;
+ }
+ }
+ }
+
+ return available;
+}
+
+
+/**
+ * Defines a property and sets writable and enumerable to true.
+ * Configurable is false by default.
+ */
+function defineWEProperty(object, property, value) {
+ $Object.defineProperty(object, property,
+ {value: value, writable: true, enumerable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets configurable descriptor to false.
+ */
+function addWEPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWEProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Defines a property and sets writable, enumerable and configurable to true.
+ */
+function defineWECProperty(object, property, value) {
+ $Object.defineProperty(object, property,
+ {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets all descriptors to true.
+ */
+function addWECPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWECProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Returns titlecased word, aMeRricA -> America.
+ */
+function toTitleCaseWord(word) {
+ return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
+}
+
+/**
+ * Canonicalizes the language tag, or throws in case the tag is invalid.
+ */
+function canonicalizeLanguageTag(localeID) {
+ // null is typeof 'object' so we have to do extra check.
+ if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
+ localeID === null) {
+ throw new $TypeError('Language ID should be string or object.');
+ }
+
+ var localeString = $String(localeID);
+
+ if (isValidLanguageTag(localeString) === false) {
+ throw new $RangeError('Invalid language tag: ' + localeString);
+ }
+
+ // This call will strip -kn but not -kn-true extensions.
+ // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
+ // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
+ // upgrade to ICU 4.9.
+ var tag = %CanonicalizeLanguageTag(localeString);
+ if (tag === 'invalid-tag') {
+ throw new $RangeError('Invalid language tag: ' + localeString);
+ }
+
+ return tag;
+}
+
+
+/**
+ * Returns an array where all locales are canonicalized and duplicates removed.
+ * Throws on locales that are not well formed BCP47 tags.
+ */
+function initializeLocaleList(locales) {
+ var seen = [];
+ if (locales === undefined) {
+ // Constructor is called without arguments.
+ seen = [];
+ } else {
+ // We allow single string localeID.
+ if (typeof locales === 'string') {
+ seen.push(canonicalizeLanguageTag(locales));
+ return freezeArray(seen);
+ }
+
+ var o = toObject(locales);
+ // Converts it to UInt32 (>>> is shr on 32bit integers).
+ var len = o.length >>> 0;
+
+ for (var k = 0; k < len; k++) {
+ if (k in o) {
+ var value = o[k];
+
+ var tag = canonicalizeLanguageTag(value);
+
+ if (seen.indexOf(tag) === -1) {
+ seen.push(tag);
+ }
+ }
+ }
+ }
+
+ return freezeArray(seen);
+}
+
+
+/**
+ * Validates the language tag. Section 2.2.9 of the bcp47 spec
+ * defines a valid tag.
+ *
+ * ICU is too permissible and lets invalid tags, like
+ * hant-cmn-cn, through.
+ *
+ * Returns false if the language tag is invalid.
+ */
+function isValidLanguageTag(locale) {
+ // Check if it's well-formed, including grandfadered tags.
+ if (GetLanguageTagRE().test(locale) === false) {
+ return false;
+ }
+
+ // Just return if it's a x- form. It's all private.
+ if (locale.indexOf('x-') === 0) {
+ return true;
+ }
+
+ // Check if there are any duplicate variants or singletons (extensions).
+
+ // Remove private use section.
+ locale = locale.split(/-x-/)[0];
+
+ // Skip language since it can match variant regex, so we start from 1.
+ // We are matching i-klingon here, but that's ok, since i-klingon-klingon
+ // is not valid and would fail LANGUAGE_TAG_RE test.
+ var variants = [];
+ var extensions = [];
+ var parts = locale.split(/-/);
+ for (var i = 1; i < parts.length; i++) {
+ var value = parts[i];
+ if (GetLanguageVariantRE().test(value) === true && extensions.length === 0) {
+ if (variants.indexOf(value) === -1) {
+ variants.push(value);
+ } else {
+ return false;
+ }
+ }
+
+ if (GetLanguageSingletonRE().test(value) === true) {
+ if (extensions.indexOf(value) === -1) {
+ extensions.push(value);
+ } else {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+
+/**
+ * Builds a regular expresion that validates the language tag
+ * against bcp47 spec.
+ * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
+ * Runs on load and initializes the global REs.
+ */
+function BuildLanguageTagREs() {
+ var alpha = '[a-zA-Z]';
+ var digit = '[0-9]';
+ var alphanum = '(' + alpha + '|' + digit + ')';
+ var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
+ 'zh-min|zh-min-nan|zh-xiang)';
+ var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
+ 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
+ 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
+ var grandfathered = '(' + irregular + '|' + regular + ')';
+ var privateUse = '(x(-' + alphanum + '{1,8})+)';
+
+ var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
+ LANGUAGE_SINGLETON_RE = new $RegExp('^' + singleton + '$', 'i');
+
+ var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
+
+ var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
+ LANGUAGE_VARIANT_RE = new $RegExp('^' + variant + '$', 'i');
+
+ var region = '(' + alpha + '{2}|' + digit + '{3})';
+ var script = '(' + alpha + '{4})';
+ var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
+ var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
+ alpha + '{5,8})';
+ var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
+ variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
+
+ var languageTag =
+ '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
+ LANGUAGE_TAG_RE = new $RegExp(languageTag, 'i');
+}
+
+/**
+ * Initializes the given object so it's a valid Collator instance.
+ * Useful for subclassing.
+ */
+function initializeCollator(collator, locales, options) {
+ if (collator.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize Collator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'collator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'usage', getOption(
+ 'usage', 'string', ['sort', 'search'], 'sort'));
+
+ var sensitivity = getOption('sensitivity', 'string',
+ ['base', 'accent', 'case', 'variant']);
+ if (sensitivity === undefined && internalOptions.usage === 'sort') {
+ sensitivity = 'variant';
+ }
+ defineWEProperty(internalOptions, 'sensitivity', sensitivity);
+
+ defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
+ 'ignorePunctuation', 'boolean', undefined, false));
+
+ var locale = resolveLocale('collator', locales, options);
+
+ // ICU can't take kb, kc... parameters through localeID, so we need to pass
+ // them as options.
+ // One exception is -co- which has to be part of the extension, but only for
+ // usage: sort, and its value can't be 'standard' or 'search'.
+ var extensionMap = parseExtension(locale.extension);
+ setOptions(
+ options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
+
+ var collation = 'default';
+ var extension = '';
+ if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
+ if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
+ extension = '-u-co-' + extensionMap.co;
+ // ICU can't tell us what the collation is, so save user's input.
+ collation = extensionMap.co;
+ }
+ } else if (internalOptions.usage === 'search') {
+ extension = '-u-co-search';
+ }
+ defineWEProperty(internalOptions, 'collation', collation);
+
+ var requestedLocale = locale.locale + extension;
+
+ // We define all properties C++ code may produce, to prevent security
+ // problems. If malicious user decides to redefine Object.prototype.locale
+ // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
+ // Object.defineProperties will either succeed defining or throw an error.
+ var resolved = $Object.defineProperties({}, {
+ caseFirst: {writable: true},
+ collation: {value: internalOptions.collation, writable: true},
+ ignorePunctuation: {writable: true},
+ locale: {writable: true},
+ numeric: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ sensitivity: {writable: true},
+ strength: {writable: true},
+ usage: {value: internalOptions.usage, writable: true}
+ });
+
+ var internalCollator = %CreateCollator(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // Writable, configurable and enumerable are set to false by default.
+ $Object.defineProperty(collator, 'collator', {value: internalCollator});
+ $Object.defineProperty(collator, '__initializedIntlObject',
+ {value: 'collator'});
+ $Object.defineProperty(collator, 'resolved', {value: resolved});
+
+ return collator;
+}
+
+
+/**
+ * Constructs Intl.Collator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'Collator', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.Collator(locales, options);
+ }
+
+ return initializeCollator(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * Collator resolvedOptions method.
+ */
+%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'collator') {
+ throw new $TypeError('resolvedOptions method called on a non-object ' +
+ 'or on a object that is not Intl.Collator.');
+ }
+
+ var coll = this;
+ var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
+ coll.resolved.locale);
+
+ return {
+ locale: locale,
+ usage: coll.resolved.usage,
+ sensitivity: coll.resolved.sensitivity,
+ ignorePunctuation: coll.resolved.ignorePunctuation,
+ numeric: coll.resolved.numeric,
+ caseFirst: coll.resolved.caseFirst,
+ collation: coll.resolved.collation
+ };
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
+%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('collator', locales, arguments[1]);
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
+%SetNativeFlag(Intl.Collator.supportedLocalesOf);
+
+
+/**
+ * When the compare method is called with two arguments x and y, it returns a
+ * Number other than NaN that represents the result of a locale-sensitive
+ * String comparison of x with y.
+ * The result is intended to order String values in the sort order specified
+ * by the effective locale and collation options computed during construction
+ * of this Collator object, and will be negative, zero, or positive, depending
+ * on whether x comes before y in the sort order, the Strings are equal under
+ * the sort order, or x comes after y in the sort order, respectively.
+ */
+function compare(collator, x, y) {
+ return %InternalCompare(collator.collator, $String(x), $String(y));
+};
+
+
+addBoundMethod(Intl.Collator, 'compare', compare, 2);
+
+/**
+ * Verifies that the input is a well-formed ISO 4217 currency code.
+ * Don't uppercase to test. It could convert invalid code into a valid one.
+ * For example \u00DFP (Eszett+P) becomes SSP.
+ */
+function isWellFormedCurrencyCode(currency) {
+ return typeof currency == "string" &&
+ currency.length == 3 &&
+ currency.match(/[^A-Za-z]/) == null;
+}
+
+
+/**
+ * Returns the valid digit count for a property, or throws RangeError on
+ * a value out of the range.
+ */
+function getNumberOption(options, property, min, max, fallback) {
+ var value = options[property];
+ if (value !== undefined) {
+ value = $Number(value);
+ if ($isNaN(value) || value < min || value > max) {
+ throw new $RangeError(property + ' value is out of range.');
+ }
+ return $floor(value);
+ }
+
+ return fallback;
+}
+
+
+/**
+ * Initializes the given object so it's a valid NumberFormat instance.
+ * Useful for subclassing.
+ */
+function initializeNumberFormat(numberFormat, locales, options) {
+ if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize NumberFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'numberformat');
+
+ var locale = resolveLocale('numberformat', locales, options);
+
+ var internalOptions = {};
+ defineWEProperty(internalOptions, 'style', getOption(
+ 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
+
+ var currency = getOption('currency', 'string');
+ if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
+ throw new $RangeError('Invalid currency code: ' + currency);
+ }
+
+ if (internalOptions.style === 'currency' && currency === undefined) {
+ throw new $TypeError('Currency code is required with currency style.');
+ }
+
+ var currencyDisplay = getOption(
+ 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
+ if (internalOptions.style === 'currency') {
+ defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
+ defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
+ }
+
+ // Digit ranges.
+ var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
+ defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
+
+ var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
+ defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+
+ var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
+ defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+
+ var mnsd = options['minimumSignificantDigits'];
+ var mxsd = options['maximumSignificantDigits'];
+ if (mnsd !== undefined || mxsd !== undefined) {
+ mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
+ defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
+
+ mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
+ defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
+ }
+
+ // Grouping.
+ defineWEProperty(internalOptions, 'useGrouping', getOption(
+ 'useGrouping', 'boolean', undefined, true));
+
+ // ICU prefers options to be passed using -u- extension key/values for
+ // number format, so we need to build that.
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = $Object.defineProperties({}, {
+ currency: {writable: true},
+ currencyDisplay: {writable: true},
+ locale: {writable: true},
+ maximumFractionDigits: {writable: true},
+ minimumFractionDigits: {writable: true},
+ minimumIntegerDigits: {writable: true},
+ numberingSystem: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ style: {value: internalOptions.style, writable: true},
+ useGrouping: {writable: true}
+ });
+ if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
+ defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
+ }
+ if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
+ defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
+ }
+ var formatter = %CreateNumberFormat(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // We can't get information about number or currency style from ICU, so we
+ // assume user request was fulfilled.
+ if (internalOptions.style === 'currency') {
+ $Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
+ writable: true});
+ }
+
+ $Object.defineProperty(numberFormat, 'formatter', {value: formatter});
+ $Object.defineProperty(numberFormat, 'resolved', {value: resolved});
+ $Object.defineProperty(numberFormat, '__initializedIntlObject',
+ {value: 'numberformat'});
+
+ return numberFormat;
+}
+
+
+/**
+ * Constructs Intl.NumberFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'NumberFormat', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.NumberFormat(locales, options);
+ }
+
+ return initializeNumberFormat(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * NumberFormat resolvedOptions method.
+ */
+%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'numberformat') {
+ throw new $TypeError('resolvedOptions method called on a non-object' +
+ ' or on a object that is not Intl.NumberFormat.');
+ }
+
+ var format = this;
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ style: format.resolved.style,
+ useGrouping: format.resolved.useGrouping,
+ minimumIntegerDigits: format.resolved.minimumIntegerDigits,
+ minimumFractionDigits: format.resolved.minimumFractionDigits,
+ maximumFractionDigits: format.resolved.maximumFractionDigits,
+ };
+
+ if (result.style === 'currency') {
+ defineWECProperty(result, 'currency', format.resolved.currency);
+ defineWECProperty(result, 'currencyDisplay',
+ format.resolved.currencyDisplay);
+ }
+
+ if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
+ defineWECProperty(result, 'minimumSignificantDigits',
+ format.resolved.minimumSignificantDigits);
+ }
+
+ if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
+ defineWECProperty(result, 'maximumSignificantDigits',
+ format.resolved.maximumSignificantDigits);
+ }
+
+ return result;
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('numberformat', locales, arguments[1]);
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(value)
+ * according to the effective locale and the formatting options of this
+ * NumberFormat.
+ */
+function formatNumber(formatter, value) {
+ // Spec treats -0 and +0 as 0.
+ var number = $Number(value);
+ if (number === -0) {
+ number = 0;
+ }
+
+ return %InternalNumberFormat(formatter.formatter, number);
+}
+
+
+/**
+ * Returns a Number that represents string value that was passed in.
+ */
+function parseNumber(formatter, value) {
+ return %InternalNumberParse(formatter.formatter, $String(value));
+}
+
+
+addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
+addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+
+/**
+ * Returns a string that matches LDML representation of the options object.
+ */
+function toLDMLString(options) {
+ var getOption = getGetOption(options, 'dateformat');
+
+ var ldmlString = '';
+
+ var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
+
+ option = getOption('era', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
+
+ option = getOption('year', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
+
+ option = getOption('month', 'string',
+ ['2-digit', 'numeric', 'narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
+ 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
+
+ option = getOption('day', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(
+ option, {'2-digit': 'dd', 'numeric': 'd'});
+
+ var hr12 = getOption('hour12', 'boolean');
+ option = getOption('hour', 'string', ['2-digit', 'numeric']);
+ if (hr12 === undefined) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
+ } else if (hr12 === true) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
+ } else {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
+ }
+
+ option = getOption('minute', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
+
+ option = getOption('second', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
+
+ option = getOption('timeZoneName', 'string', ['short', 'long']);
+ ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
+
+ return ldmlString;
+}
+
+
+/**
+ * Returns either LDML equivalent of the current option or empty string.
+ */
+function appendToLDMLString(option, pairs) {
+ if (option !== undefined) {
+ return pairs[option];
+ } else {
+ return '';
+ }
+}
+
+
+/**
+ * Returns object that matches LDML representation of the date.
+ */
+function fromLDMLString(ldmlString) {
+ // First remove '' quoted text, so we lose 'Uhr' strings.
+ ldmlString = ldmlString.replace(GetQuotedStringRE(), '');
+
+ var options = {};
+ var match = ldmlString.match(/E{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
+
+ match = ldmlString.match(/G{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
+
+ match = ldmlString.match(/y{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'year', match, {y: 'numeric', yy: '2-digit'});
+
+ match = ldmlString.match(/M{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
+ M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
+
+ // Sometimes we get L instead of M for month - standalone name.
+ match = ldmlString.match(/L{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
+ L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
+
+ match = ldmlString.match(/d{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'day', match, {d: 'numeric', dd: '2-digit'});
+
+ match = ldmlString.match(/h{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = true;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {h: 'numeric', hh: '2-digit'});
+
+ match = ldmlString.match(/H{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = false;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {H: 'numeric', HH: '2-digit'});
+
+ match = ldmlString.match(/m{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'minute', match, {m: 'numeric', mm: '2-digit'});
+
+ match = ldmlString.match(/s{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'second', match, {s: 'numeric', ss: '2-digit'});
+
+ match = ldmlString.match(/v{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'timeZoneName', match, {v: 'short', vv: 'long'});
+
+ return options;
+}
+
+
+function appendToDateTimeObject(options, option, match, pairs) {
+ if (match === null) {
+ if (!options.hasOwnProperty(option)) {
+ defineWEProperty(options, option, undefined);
+ }
+ return options;
+ }
+
+ var property = match[0];
+ defineWEProperty(options, option, pairs[property]);
+
+ return options;
+}
+
+
+/**
+ * Returns options with at least default values in it.
+ */
+function toDateTimeOptions(options, required, defaults) {
+ if (options === undefined) {
+ options = null;
+ } else {
+ options = toObject(options);
+ }
+
+ options = $Object.apply(this, [options]);
+
+ var needsDefault = true;
+ if ((required === 'date' || required === 'any') &&
+ (options.weekday !== undefined || options.year !== undefined ||
+ options.month !== undefined || options.day !== undefined)) {
+ needsDefault = false;
+ }
+
+ if ((required === 'time' || required === 'any') &&
+ (options.hour !== undefined || options.minute !== undefined ||
+ options.second !== undefined)) {
+ needsDefault = false;
+ }
+
+ if (needsDefault && (defaults === 'date' || defaults === 'all')) {
+ $Object.defineProperty(options, 'year', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'month', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'day', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ if (needsDefault && (defaults === 'time' || defaults === 'all')) {
+ $Object.defineProperty(options, 'hour', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'minute', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'second', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ return options;
+}
+
+
+/**
+ * Initializes the given object so it's a valid DateTimeFormat instance.
+ * Useful for subclassing.
+ */
+function initializeDateTimeFormat(dateFormat, locales, options) {
+
+ if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize DateTimeFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var locale = resolveLocale('dateformat', locales, options);
+
+ options = toDateTimeOptions(options, 'any', 'date');
+
+ var getOption = getGetOption(options, 'dateformat');
+
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ var matcher = getOption('formatMatcher', 'string',
+ ['basic', 'best fit'], 'best fit');
+
+ // Build LDML string for the skeleton that we pass to the formatter.
+ var ldmlString = toLDMLString(options);
+
+ // Filter out supported extension keys so we know what to put in resolved
+ // section later on.
+ // We need to pass calendar and number system to the method.
+ var tz = canonicalizeTimeZoneID(options.timeZone);
+
+ // ICU prefers options to be passed using -u- extension key/values, so
+ // we need to build that.
+ var internalOptions = {};
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = $Object.defineProperties({}, {
+ calendar: {writable: true},
+ day: {writable: true},
+ era: {writable: true},
+ hour12: {writable: true},
+ hour: {writable: true},
+ locale: {writable: true},
+ minute: {writable: true},
+ month: {writable: true},
+ numberingSystem: {writable: true},
+ pattern: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ second: {writable: true},
+ timeZone: {writable: true},
+ timeZoneName: {writable: true},
+ tz: {value: tz, writable: true},
+ weekday: {writable: true},
+ year: {writable: true}
+ });
+
+ var formatter = %CreateDateTimeFormat(
+ requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
+
+ if (tz !== undefined && tz !== resolved.timeZone) {
+ throw new $RangeError('Unsupported time zone specified ' + tz);
+ }
+
+ $Object.defineProperty(dateFormat, 'formatter', {value: formatter});
+ $Object.defineProperty(dateFormat, 'resolved', {value: resolved});
+ $Object.defineProperty(dateFormat, '__initializedIntlObject',
+ {value: 'dateformat'});
+
+ return dateFormat;
+}
+
+
+/**
+ * Constructs Intl.DateTimeFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'DateTimeFormat', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.DateTimeFormat(locales, options);
+ }
+
+ return initializeDateTimeFormat(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * DateTimeFormat resolvedOptions method.
+ */
+%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'dateformat') {
+ throw new $TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.DateTimeFormat.');
+ }
+
+ var format = this;
+ var fromPattern = fromLDMLString(format.resolved.pattern);
+ var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
+ if (userCalendar === undefined) {
+ // Use ICU name if we don't have a match. It shouldn't happen, but
+ // it would be too strict to throw for this.
+ userCalendar = format.resolved.calendar;
+ }
+
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ calendar: userCalendar,
+ timeZone: format.resolved.timeZone
+ };
+
+ addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
+ addWECPropertyIfDefined(result, 'era', fromPattern.era);
+ addWECPropertyIfDefined(result, 'year', fromPattern.year);
+ addWECPropertyIfDefined(result, 'month', fromPattern.month);
+ addWECPropertyIfDefined(result, 'day', fromPattern.day);
+ addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
+ addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
+ addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
+ addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
+ addWECPropertyIfDefined(result, 'second', fromPattern.second);
+
+ return result;
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('dateformat', locales, arguments[1]);
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(date)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ */
+function formatDate(formatter, dateValue) {
+ var dateMs;
+ if (dateValue === undefined) {
+ dateMs = $Date.now();
+ } else {
+ dateMs = $Number(dateValue);
+ }
+
+ if (!$isFinite(dateMs)) {
+ throw new $RangeError('Provided date is not in valid range.');
+ }
+
+ return %InternalDateFormat(formatter.formatter, new $Date(dateMs));
+}
+
+
+/**
+ * Returns a Date object representing the result of calling ToString(value)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ * Returns undefined if date string cannot be parsed.
+ */
+function parseDate(formatter, value) {
+ return %InternalDateParse(formatter.formatter, $String(value));
+}
+
+
+// 0 because date is optional argument.
+addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
+addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+
+
+/**
+ * Returns canonical Area/Location name, or throws an exception if the zone
+ * name is invalid IANA name.
+ */
+function canonicalizeTimeZoneID(tzID) {
+ // Skip undefined zones.
+ if (tzID === undefined) {
+ return tzID;
+ }
+
+ // Special case handling (UTC, GMT).
+ var upperID = tzID.toUpperCase();
+ if (upperID === 'UTC' || upperID === 'GMT' ||
+ upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
+ return 'UTC';
+ }
+
+ // We expect only _ and / beside ASCII letters.
+ // All inputs should conform to Area/Location from now on.
+ var match = GetTimezoneNameCheckRE().exec(tzID);
+ if (match === null) {
+ throw new $RangeError('Expected Area/Location for time zone, got ' + tzID);
+ }
+
+ var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
+ var i = 3;
+ while (match[i] !== undefined && i < match.length) {
+ result = result + '_' + toTitleCaseWord(match[i]);
+ i++;
+ }
+
+ return result;
+}
+
+/**
+ * Initializes the given object so it's a valid BreakIterator instance.
+ * Useful for subclassing.
+ */
+function initializeBreakIterator(iterator, locales, options) {
+ if (iterator.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize v8BreakIterator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'breakiterator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'type', getOption(
+ 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
+
+ var locale = resolveLocale('breakiterator', locales, options);
+ var resolved = $Object.defineProperties({}, {
+ requestedLocale: {value: locale.locale, writable: true},
+ type: {value: internalOptions.type, writable: true},
+ locale: {writable: true}
+ });
+
+ var internalIterator = %CreateBreakIterator(locale.locale,
+ internalOptions,
+ resolved);
+
+ $Object.defineProperty(iterator, 'iterator', {value: internalIterator});
+ $Object.defineProperty(iterator, 'resolved', {value: resolved});
+ $Object.defineProperty(iterator, '__initializedIntlObject',
+ {value: 'breakiterator'});
+
+ return iterator;
+}
+
+
+/**
+ * Constructs Intl.v8BreakIterator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'v8BreakIterator', function() {
+ var locales = arguments[0];
+ var options = arguments[1];
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.v8BreakIterator(locales, options);
+ }
+
+ return initializeBreakIterator(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * BreakIterator resolvedOptions method.
+ */
+%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'breakiterator') {
+ throw new $TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.v8BreakIterator.');
+ }
+
+ var segmenter = this;
+ var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
+ segmenter.resolved.locale);
+
+ return {
+ locale: locale,
+ type: segmenter.resolved.type
+ };
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('breakiterator', locales, arguments[1]);
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
+%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
+
+
+/**
+ * Adopts text to segment using the iterator. Old text, if present,
+ * gets discarded.
+ */
+function adoptText(iterator, text) {
+ %BreakIteratorAdoptText(iterator.iterator, $String(text));
+}
+
+
+/**
+ * Returns index of the first break in the string and moves current pointer.
+ */
+function first(iterator) {
+ return %BreakIteratorFirst(iterator.iterator);
+}
+
+
+/**
+ * Returns the index of the next break and moves the pointer.
+ */
+function next(iterator) {
+ return %BreakIteratorNext(iterator.iterator);
+}
+
+
+/**
+ * Returns index of the current break.
+ */
+function current(iterator) {
+ return %BreakIteratorCurrent(iterator.iterator);
+}
+
+
+/**
+ * Returns type of the current break.
+ */
+function breakType(iterator) {
+ return %BreakIteratorBreakType(iterator.iterator);
+}
+
+
+addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
+addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
+addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
+addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
+addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+
+// Save references to Intl objects and methods we use, for added security.
+var savedObjects = {
+ 'collator': Intl.Collator,
+ 'numberformat': Intl.NumberFormat,
+ 'dateformatall': Intl.DateTimeFormat,
+ 'dateformatdate': Intl.DateTimeFormat,
+ 'dateformattime': Intl.DateTimeFormat
+};
+
+
+// Default (created with undefined locales and options parameters) collator,
+// number and date format instances. They'll be created as needed.
+var defaultObjects = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformatall': undefined,
+ 'dateformatdate': undefined,
+ 'dateformattime': undefined,
+};
+
+
+/**
+ * Returns cached or newly created instance of a given service.
+ * We cache only default instances (where no locales or options are provided).
+ */
+function cachedOrNewService(service, locales, options, defaults) {
+ var useOptions = (defaults === undefined) ? options : defaults;
+ if (locales === undefined && options === undefined) {
+ if (defaultObjects[service] === undefined) {
+ defaultObjects[service] = new savedObjects[service](locales, useOptions);
+ }
+ return defaultObjects[service];
+ }
+ return new savedObjects[service](locales, useOptions);
+}
+
+
+/**
+ * Compares this and that, and returns less than 0, 0 or greater than 0 value.
+ * Overrides the built-in method.
+ */
+$Object.defineProperty($String.prototype, 'localeCompare', {
+ value: function(that) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (this === undefined || this === null) {
+ throw new $TypeError('Method invoked on undefined or null value.');
+ }
+
+ var locales = arguments[1];
+ var options = arguments[2];
+ var collator = cachedOrNewService('collator', locales, options);
+ return compare(collator, this, that);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($String.prototype.localeCompare, 'localeCompare');
+%FunctionRemovePrototype($String.prototype.localeCompare);
+%SetNativeFlag($String.prototype.localeCompare);
+
+
+/**
+ * Formats a Number object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used.
+ */
+$Object.defineProperty($Number.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!(this instanceof $Number) && typeof(this) !== 'number') {
+ throw new $TypeError('Method invoked on an object that is not Number.');
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ var numberFormat = cachedOrNewService('numberformat', locales, options);
+ return formatNumber(numberFormat, this);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Number.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype($Number.prototype.toLocaleString);
+%SetNativeFlag($Number.prototype.toLocaleString);
+
+
+/**
+ * Returns actual formatted date or fails if date parameter is invalid.
+ */
+function toLocaleDateTime(date, locales, options, required, defaults, service) {
+ if (!(date instanceof $Date)) {
+ throw new $TypeError('Method invoked on an object that is not Date.');
+ }
+
+ if ($isNaN(date)) {
+ return 'Invalid Date';
+ }
+
+ var internalOptions = toDateTimeOptions(options, required, defaults);
+
+ var dateFormat =
+ cachedOrNewService(service, locales, options, internalOptions);
+
+ return formatDate(dateFormat, date);
+}
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - both date and time are
+ * present in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ return toLocaleDateTime(
+ this, locales, options, 'any', 'all', 'dateformatall');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype($Date.prototype.toLocaleString);
+%SetNativeFlag($Date.prototype.toLocaleString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only date is present
+ * in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleDateString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ return toLocaleDateTime(
+ this, locales, options, 'date', 'date', 'dateformatdate');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleDateString, 'toLocaleDateString');
+%FunctionRemovePrototype($Date.prototype.toLocaleDateString);
+%SetNativeFlag($Date.prototype.toLocaleDateString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only time is present
+ * in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleTimeString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = arguments[0];
+ var options = arguments[1];
+ return toLocaleDateTime(
+ this, locales, options, 'time', 'time', 'dateformattime');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
+%FunctionRemovePrototype($Date.prototype.toLocaleTimeString);
+%SetNativeFlag($Date.prototype.toLocaleTimeString);
+
+return Intl;
+}())});
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index b6ef242a2..5a35b207f 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -251,7 +251,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -266,12 +266,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (IsRuntimeEntry(mode)) {
@@ -329,14 +328,11 @@ Immediate::Immediate(Label* internal_offset) {
Immediate::Immediate(Handle<Object> handle) {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
-#endif
AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 7bea37302..e5456da47 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -101,80 +101,28 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old esp, since we are going to modify the stack.
- __ push(ebp);
- __ pushfd();
- __ push(ecx);
- __ push(ebx);
- __ mov(ebp, esp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfd();
- __ pop(eax);
- __ mov(edx, eax);
- __ xor_(eax, 0x200000); // Flip bit 21.
- __ push(eax);
- __ popfd();
- __ pushfd();
- __ pop(eax);
- __ xor_(eax, edx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, eax);
- __ xor_(edx, edx);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ mov(eax, 1);
- supported_ = (1 << CPUID);
- { CpuFeatureScope fscope(&assm, CPUID);
- __ cpuid();
+ if (cpu.has_sse2()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
+ }
+ if (cpu.has_cmov()) {
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
}
- supported_ = 0;
-
- // Move the result from ecx:edx to edx:eax and make sure to mark the
- // CPUID feature as supported.
- __ mov(eax, edx);
- __ or_(eax, 1 << CPUID);
- __ mov(edx, ecx);
-
- // Done.
- __ bind(&done);
- __ mov(esp, ebp);
- __ pop(ebx);
- __ pop(ecx);
- __ popfd();
- __ pop(ebp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- uint64_t probed_features = probe();
+
+ // SAHF must be available in compat/legacy mode.
+ ASSERT(cpu.has_sahf());
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
+
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
found_by_runtime_probing_only_ = probed_features & ~platform_features;
-
- delete memory;
}
@@ -474,7 +422,6 @@ void Assembler::CodeTargetAlign() {
void Assembler::cpuid() {
- ASSERT(IsEnabled(CPUID));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA2);
@@ -1310,14 +1257,6 @@ void Assembler::nop() {
}
-void Assembler::rdtsc() {
- ASSERT(IsEnabled(RDTSC));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -1641,6 +1580,13 @@ void Assembler::fstp_s(const Operand& adr) {
}
+void Assembler::fst_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(edx, adr);
+}
+
+
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
@@ -1775,12 +1721,24 @@ void Assembler::fadd(int i) {
}
+void Assembler::fadd_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC0, i);
+}
+
+
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
}
+void Assembler::fsub_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xE0, i);
+}
+
+
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
@@ -1806,6 +1764,12 @@ void Assembler::fdiv(int i) {
}
+void Assembler::fdiv_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xF0, i);
+}
+
+
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC0, i);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 8380897f6..55eff9319 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -183,6 +183,7 @@ const IntelDoubleRegister double_register_4 = { 4 };
const IntelDoubleRegister double_register_5 = { 5 };
const IntelDoubleRegister double_register_6 = { 6 };
const IntelDoubleRegister double_register_7 = { 7 };
+const IntelDoubleRegister no_double_reg = { -1 };
struct XMMRegister : IntelDoubleRegister {
@@ -227,6 +228,7 @@ struct XMMRegister : IntelDoubleRegister {
#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
+#define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg))
struct X87Register : IntelDoubleRegister {
@@ -537,7 +539,6 @@ class CpuFeatures : public AllStatic {
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -870,7 +871,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
// Label operations & relative jumps (PPUM Appendix D)
@@ -929,6 +929,7 @@ class Assembler : public AssemblerBase {
void fld_d(const Operand& adr);
void fstp_s(const Operand& adr);
+ void fst_s(const Operand& adr);
void fstp_d(const Operand& adr);
void fst_d(const Operand& adr);
@@ -955,10 +956,13 @@ class Assembler : public AssemblerBase {
void fninit();
void fadd(int i);
+ void fadd_i(int i);
void fsub(int i);
+ void fsub_i(int i);
void fmul(int i);
void fmul_i(int i);
void fdiv(int i);
+ void fdiv_i(int i);
void fisub_s(const Operand& adr);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 59124eab7..a1597481a 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -74,6 +74,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
@@ -83,56 +101,29 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
-}
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -519,25 +510,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
@@ -545,25 +518,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
@@ -1327,32 +1282,47 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ SmiTag(edx);
+
+ // Pass both function and pc offset as arguments.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(edx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ cmp(eax, Immediate(Smi::FromInt(-1)));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmp(eax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(eax);
- __ push(eax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(ebx);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ mov(Operand(esp, 0), eax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index d62f82ad6..a83c1ae91 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -43,6 +43,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -299,133 +310,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ Allocate(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- Factory* factory = masm->isolate()->factory();
- __ mov(ebx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(factory->the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ test(ebx, ebx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // ecx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // Map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot));
- __ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. edx holds an index as a Smi.
- Label loop;
- Label restore;
- __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
- __ j(equal, &restore);
- __ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the optimized code
- // map and either unmangle them on marking or do nothing as the whole map is
- // discarded on major GC anyway.
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Now link a function into a list of optimized functions.
- __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(edx, eax);
- __ RecordWriteContextSlot(
- ecx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- edx,
- ebx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- // Restore SharedFunctionInfo into edx.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(Immediate(factory->false_value()));
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -613,11 +497,6 @@ class FloatingPointHelper : public AllStatic {
BinaryOpIC::TypeInfo right_type,
Label* operand_conversion_failure);
- // Assumes that operands are smis or heap numbers and loads them
- // into xmm0 and xmm1. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
@@ -779,18 +658,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
-// |conversion_failure| if the heap number did not contain an int32 value.
-// Result is in ecx. Trashes ebx, xmm0, and xmm1.
-static void ConvertHeapNumberToInt32(MacroAssembler* masm,
- Register source,
- Label* conversion_failure) {
- __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, conversion_failure, xmm0, ecx, ebx, xmm1);
-}
-
-
void BinaryOpStub::Initialize() {
platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
}
@@ -2391,16 +2258,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
__ cmp(ebx, factory->heap_number_map());
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in ecx.
- if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- ConvertHeapNumberToInt32(masm, edx, conversion_failure);
- } else {
- DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
- __ mov(edx, ecx);
+ __ TruncateHeapNumberToI(edx, edx);
// Here edx has the untagged integer, eax has a Smi or a heap number.
__ bind(&load_arg2);
@@ -2429,14 +2287,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
- if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- ConvertHeapNumberToInt32(masm, eax, conversion_failure);
- } else {
- DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
+ __ TruncateHeapNumberToI(ecx, eax);
__ bind(&done);
__ mov(eax, edx);
@@ -2461,33 +2312,6 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
@@ -2690,16 +2514,16 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, Operand(double_exponent));
+ Label fast_power, try_arithmetic_simplification;
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cvttsd2si(exponent, Operand(double_exponent));
__ cmp(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -4377,16 +4201,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
@@ -4616,8 +4430,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@@ -5701,7 +5515,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -5709,22 +5522,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- &not_cached);
+ slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(zero, slow);
- __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ mov(Operand(esp, stack_offset), arg);
-
__ bind(&done);
}
@@ -6998,8 +6798,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in CompileArrayPushCall.
{ REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
{ REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal and CallFunctionStub.
- { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
@@ -7033,7 +6831,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7427,96 +7225,128 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // ebx - type info cell
- // edx - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // ebx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
// eax - number of arguments
// edi - constructor?
// esp[0] - return address
// esp[4] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
- // is the low bit set? If so, we are holey and that is good.
- __ test_b(edx, 1);
Label normal_sequence;
- __ j(not_zero, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ test_b(edx, 1);
+ __ j(not_zero, &normal_sequence);
+ }
// look at the first argument
__ mov(ecx, Operand(esp, kPointerSize));
__ test(ecx, ecx);
__ j(zero, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ inc(edx);
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &normal_sequence);
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(not_equal, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ SmiTag(edx);
- __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
- __ SmiUntag(edx);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry.
+ __ inc(edx);
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(edx);
+ __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
+ __ SmiUntag(edx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7549,6 +7379,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
@@ -7583,50 +7441,22 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
-
- // The type cell may have undefined in its value.
- __ cmp(edx, Immediate(undefined_sentinel));
- __ j(equal, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
__ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
masm->isolate()->heap()->allocation_site_map())));
__ j(not_equal, &no_info);
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ mov(edx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index e80acc6cc..5c8eca37b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -74,7 +74,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -327,7 +327,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 28b0f4ad8..84a4d238b 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -177,11 +177,6 @@ UnaryMathFunction CreateSqrtFunction() {
#undef __
#define __ ACCESS_MASM(masm)
-// Keep around global pointers to these objects so that Valgrind won't complain.
-static size_t* medium_handlers = NULL;
-static size_t* small_handlers = NULL;
-
-
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
@@ -253,12 +248,24 @@ void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
#define __ masm.
+class LabelConverter {
+ public:
+ explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
+ int32_t address(Label* l) const {
+ return reinterpret_cast<int32_t>(buffer_) + l->pos();
+ }
+ private:
+ byte* buffer_;
+};
+
+
OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -452,7 +459,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
// Special handlers for 9 <= copy_size < 64. No assumptions about
// alignment or move distance, so all reads must be unaligned and
// must happen before any writes.
- Label f9_16, f17_32, f33_48, f49_63;
+ Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
__ movdbl(xmm0, Operand(src, 0));
@@ -488,11 +495,11 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
MemMoveEmitPopAndReturn(&masm);
- medium_handlers = new size_t[4];
- medium_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f9_16.pos();
- medium_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f17_32.pos();
- medium_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f33_48.pos();
- medium_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f49_63.pos();
+ __ bind(&medium_handlers);
+ __ dd(conv.address(&f9_16));
+ __ dd(conv.address(&f17_32));
+ __ dd(conv.address(&f33_48));
+ __ dd(conv.address(&f49_63));
__ bind(&medium_size); // Entry point into this block.
__ mov(eax, count);
@@ -505,13 +512,12 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ int3();
__ bind(&ok);
}
- __ mov(eax, Operand(eax, times_4,
- reinterpret_cast<intptr_t>(medium_handlers)));
+ __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
__ jmp(eax);
}
{
// Specialized copiers for copy_size <= 8 bytes.
- Label f0, f1, f2, f3, f4, f5_8;
+ Label small_handlers, f0, f1, f2, f3, f4, f5_8;
__ bind(&f0);
MemMoveEmitPopAndReturn(&masm);
@@ -544,16 +550,16 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ mov(Operand(dst, count, times_1, -4), edx);
MemMoveEmitPopAndReturn(&masm);
- small_handlers = new size_t[9];
- small_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f0.pos();
- small_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f1.pos();
- small_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f2.pos();
- small_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f3.pos();
- small_handlers[4] = reinterpret_cast<intptr_t>(buffer) + f4.pos();
- small_handlers[5] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
- small_handlers[6] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
- small_handlers[7] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
- small_handlers[8] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ __ bind(&small_handlers);
+ __ dd(conv.address(&f0));
+ __ dd(conv.address(&f1));
+ __ dd(conv.address(&f2));
+ __ dd(conv.address(&f3));
+ __ dd(conv.address(&f4));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
__ bind(&small_size); // Entry point into this block.
if (FLAG_debug_code) {
@@ -563,8 +569,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ int3();
__ bind(&ok);
}
- __ mov(eax, Operand(count, times_4,
- reinterpret_cast<intptr_t>(small_handlers)));
+ __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
__ jmp(eax);
}
} else {
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 6db381e47..6a207ca9b 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -53,7 +53,7 @@ class CodeGenerator {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 77ff169b5..5fb04fc72 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -72,20 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#elif defined(__native_client__)
- asm("hlt");
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index fd703dcc0..76a7003bf 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -49,8 +49,8 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
+ rinfo()->PatchCodeWithCall(
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -79,7 +79,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = debug_info_->GetIsolate();
rinfo()->PatchCodeWithCall(
isolate->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index a9bd8c50b..13a70afe5 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -200,12 +200,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -221,12 +216,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -241,214 +231,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
- ast_id,
- input_frame_size,
- output_frame_size,
- input_->GetRegister(ebp.code()),
- input_->GetRegister(esp.code()));
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & kPointerSize) != 0) {
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
-
- int32_t alignment_state = (has_alignment_padding_ == 1) ?
- kAlignmentPaddingPushed :
- kNoAlignmentPadding;
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
- output_offset,
- alignment_state);
- }
- output_[0]->SetFrameSlot(output_offset, alignment_state);
- output_offset -= kPointerSize;
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), frame_pointer);
- output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -616,27 +425,17 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
- if (type() != OSR) {
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, kAlignmentMarkerExpected);
- }
- __ bind(&no_padding);
- } else {
- // If frame needs dynamic alignment push padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ push(Immediate(kAlignmentZapValue));
- __ bind(&no_padding);
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, kAlignmentMarkerExpected);
}
+ __ bind(&no_padding);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -663,7 +462,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
- // In case of OSR or a failed STUB, we have to restore the XMM registers.
+ // In case of a failed STUB, we have to restore the XMM registers.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
@@ -674,9 +473,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(ebx, FrameDescription::state_offset()));
- }
+ __ push(Operand(ebx, FrameDescription::state_offset()));
__ push(Operand(ebx, FrameDescription::pc_offset()));
__ push(Operand(ebx, FrameDescription::continuation_offset()));
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index c43f11c00..01fa99964 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -606,7 +606,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
}
ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 > 0) {
+ if (imm8 >= 0) {
AppendToBuffer("%d", imm8);
} else {
AppendToBuffer("cl");
@@ -698,6 +698,7 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
switch (escape_opcode) {
case 0xD9: switch (regop) {
case 0: mnem = "fld_s"; break;
+ case 2: mnem = "fst_s"; break;
case 3: mnem = "fstp_s"; break;
case 7: mnem = "fstcw"; break;
default: UnimplementedInstruction();
@@ -743,7 +744,14 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
switch (escape_opcode) {
case 0xD8:
- UnimplementedInstruction();
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd_i"; break;
+ case 0xE0: mnem = "fsub_i"; break;
+ case 0xC8: mnem = "fmul_i"; break;
+ case 0xF0: mnem = "fdiv_i"; break;
+ default: UnimplementedInstruction();
+ }
break;
case 0xD9:
@@ -767,6 +775,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xEE: mnem = "fldz"; break;
case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
+ case 0xF4: mnem = "fxtract"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
@@ -815,6 +824,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
has_register = true;
switch (modrm_byte & 0xF8) {
case 0xC0: mnem = "ffree"; break;
+ case 0xD0: mnem = "fst"; break;
case 0xD8: mnem = "fstp"; break;
default: UnimplementedInstruction();
}
@@ -862,7 +872,6 @@ static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x18: return "prefetch";
case 0xA2: return "cpuid";
- case 0x31: return "rdtsc";
case 0xBE: return "movsx_b";
case 0xBF: return "movsx_w";
case 0xB6: return "movzx_b";
@@ -1449,6 +1458,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += D1D3C1Instruction(data);
break;
+ case 0xD8: // fall through
case 0xD9: // fall through
case 0xDA: // fall through
case 0xDB: // fall through
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index f08a269e8..6d39cc1e6 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -288,8 +288,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -347,8 +346,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -395,8 +393,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(eax);
EmitProfilingCounterReset();
@@ -1268,7 +1266,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ push(Immediate(info));
+ __ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
__ push(esi);
@@ -2958,7 +2956,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2972,7 +2970,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitField2Offset),
1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Return false for slow case objects.
__ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
@@ -3018,6 +3016,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Reload map as register ebx was used as temporary above.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
@@ -3029,14 +3033,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(ecx,
ContextOperand(edx,
Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3272,7 +3271,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 1e0f14e76..327ac5762 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1306,7 +1306,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
// Cache miss: Jump to runtime.
@@ -1425,8 +1425,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index a6c1f5a7e..d50b780d7 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -35,6 +35,7 @@
#include "deoptimizer.h"
#include "stub-cache.h"
#include "codegen.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -48,7 +49,7 @@ static SaveFPRegsMode GetSaveFPRegsMode() {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -56,11 +57,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) {}
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {}
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -332,6 +333,28 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Save the first local, which is overwritten by the alignment state.
+ Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+ __ push(alignment_loc);
+
+ // Set the dynamic frame alignment state to "not aligned".
+ __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 1);
+ __ sub(esp, Immediate((slots - 1) * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -360,8 +383,11 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(x87_stack_depth_);
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
}
}
@@ -424,6 +450,8 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
+ X87Stack copy(code->x87_stack());
+ x87_stack_ = copy;
int pos = instructions_->at(code->instruction_index())->position();
RecordAndUpdatePosition(pos);
@@ -448,6 +476,7 @@ bool LCodeGen::GenerateDeferredCode() {
}
code->Generate();
if (NeedsDeferredFrame()) {
+ __ bind(code->done());
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
@@ -497,21 +526,22 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
void LCodeGen::X87LoadForUsage(X87Register reg) {
- ASSERT(X87StackContains(reg));
- X87Fxch(reg);
- x87_stack_depth_--;
+ ASSERT(x87_stack_.Contains(reg));
+ x87_stack_.Fxch(reg);
+ x87_stack_.pop();
}
-void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
- ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
- int i = X87ArrayIndex(reg);
- int st = x87_st2idx(i);
+void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg) && stack_depth_ > other_slot);
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
if (st != other_slot) {
- int other_i = x87_st2idx(other_slot);
- X87Register other = x87_stack_[other_i];
- x87_stack_[other_i] = reg;
- x87_stack_[i] = other;
+ int other_i = st2idx(other_slot);
+ X87Register other = stack_[other_i];
+ stack_[other_i] = reg;
+ stack_[i] = other;
if (st == 0) {
__ fxch(other_slot);
} else if (other_slot == 0) {
@@ -525,88 +555,104 @@ void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
}
-int LCodeGen::x87_st2idx(int pos) {
- return x87_stack_depth_ - pos - 1;
+int LCodeGen::X87Stack::st2idx(int pos) {
+ return stack_depth_ - pos - 1;
}
-int LCodeGen::X87ArrayIndex(X87Register reg) {
- for (int i = 0; i < x87_stack_depth_; i++) {
- if (x87_stack_[i].is(reg)) return i;
+int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return i;
}
UNREACHABLE();
return -1;
}
-bool LCodeGen::X87StackContains(X87Register reg) {
- for (int i = 0; i < x87_stack_depth_; i++) {
- if (x87_stack_[i].is(reg)) return true;
+bool LCodeGen::X87Stack::Contains(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return true;
}
return false;
}
-void LCodeGen::X87Free(X87Register reg) {
- ASSERT(X87StackContains(reg));
- int i = X87ArrayIndex(reg);
- int st = x87_st2idx(i);
+void LCodeGen::X87Stack::Free(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg));
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
if (st > 0) {
// keep track of how fstp(i) changes the order of elements
- int tos_i = x87_st2idx(0);
- x87_stack_[i] = x87_stack_[tos_i];
+ int tos_i = st2idx(0);
+ stack_[i] = stack_[tos_i];
}
- x87_stack_depth_--;
+ pop();
__ fstp(st);
}
void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
- if (X87StackContains(dst)) {
- X87Fxch(dst);
+ if (x87_stack_.Contains(dst)) {
+ x87_stack_.Fxch(dst);
__ fstp(0);
} else {
- ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
- x87_stack_[x87_stack_depth_] = dst;
- x87_stack_depth_++;
+ x87_stack_.push(dst);
}
X87Fld(src, opts);
}
void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
- if (opts == kX87DoubleOperand) {
- __ fld_d(src);
- } else if (opts == kX87FloatOperand) {
- __ fld_s(src);
- } else if (opts == kX87IntOperand) {
- __ fild_s(src);
- } else {
- UNREACHABLE();
+ ASSERT(!src.is_reg_only());
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fld_d(src);
+ break;
+ case kX87FloatOperand:
+ __ fld_s(src);
+ break;
+ case kX87IntOperand:
+ __ fild_s(src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LCodeGen::X87Mov(Operand dst, X87Register src) {
- X87Fxch(src);
- __ fst_d(dst);
+void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
+ ASSERT(!dst.is_reg_only());
+ x87_stack_.Fxch(src);
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fst_d(dst);
+ break;
+ case kX87IntOperand:
+ __ fist_s(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LCodeGen::X87PrepareToWrite(X87Register reg) {
- if (X87StackContains(reg)) {
- X87Free(reg);
+void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ if (Contains(reg)) {
+ Free(reg);
}
// Mark this register as the next register to write to
- x87_stack_[x87_stack_depth_] = reg;
+ stack_[stack_depth_] = reg;
}
-void LCodeGen::X87CommitWrite(X87Register reg) {
+void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
+ ASSERT(is_mutable_);
// Assert the reg is prepared to write, but not on the virtual stack yet
- ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
- x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
- x87_stack_depth_++;
+ ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+ stack_depth_ < X87Register::kNumAllocatableRegisters);
+ stack_depth_++;
}
@@ -614,38 +660,62 @@ void LCodeGen::X87PrepareBinaryOp(
X87Register left, X87Register right, X87Register result) {
// You need to use DefineSameAsFirst for x87 instructions
ASSERT(result.is(left));
- X87Fxch(right, 1);
- X87Fxch(left);
+ x87_stack_.Fxch(right, 1);
+ x87_stack_.Fxch(left);
}
-void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
- if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
+ if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
bool double_inputs = instr->HasDoubleRegisterInput();
// Flush stack from tos down, since FreeX87() will mess with tos
- for (int i = x87_stack_depth_-1; i >= 0; i--) {
- X87Register reg = x87_stack_[i];
+ for (int i = stack_depth_-1; i >= 0; i--) {
+ X87Register reg = stack_[i];
// Skip registers which contain the inputs for the next instruction
// when flushing the stack
- if (double_inputs && instr->IsDoubleInput(reg, this)) {
+ if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
continue;
}
- X87Free(reg);
- if (i < x87_stack_depth_-1) i++;
+ Free(reg);
+ if (i < stack_depth_-1) i++;
}
}
if (instr->IsReturn()) {
- while (x87_stack_depth_ > 0) {
+ while (stack_depth_ > 0) {
__ fstp(0);
- x87_stack_depth_--;
+ stack_depth_--;
}
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
+ }
+}
+
+
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
+ ASSERT(stack_depth_ <= 1);
+ // If ever used for new stubs producing two pairs of doubles joined into two
+ // phis this assert hits. That situation is not handled, since the two stacks
+ // might have st0 and st1 swapped.
+ if (current_block_id + 1 != goto_instr->block_id()) {
+ // If we have a value on the x87 stack on leaving a block, it must be a
+ // phi input. If the next block we compile is not the join block, we have
+ // to discard the stack state.
+ stack_depth_ = 0;
}
}
void LCodeGen::EmitFlushX87ForDeopt() {
- for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
+ // The deoptimizer does not support X87 Registers. But as long as we
+ // deopt from a stub its not a problem, since we will re-materialize the
+ // original stub inputs, which can't be double registers.
+ ASSERT(info()->IsStub());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ pushfd();
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ __ popfd();
+ }
+ for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
}
@@ -685,7 +755,7 @@ int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -848,7 +918,7 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -908,7 +978,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadObject(esi, Handle<Object>::cast(constant->handle()));
+ __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
} else {
UNREACHABLE();
}
@@ -1003,7 +1073,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
// we can have inputs or outputs of the current instruction on the stack,
// thus we need to flush them here from the physical stack to leave it in a
// consistent state.
- if (x87_stack_depth_ > 0) {
+ if (x87_stack_.depth() > 0) {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
EmitFlushX87ForDeopt();
@@ -1289,8 +1359,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1766,8 +1835,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
__ ror_cl(ToRegister(left));
if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
}
break;
case Token::SAR:
@@ -1776,8 +1845,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
}
break;
case Token::SHL:
@@ -1793,8 +1862,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
switch (instr->op()) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1806,8 +1875,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHR:
if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
} else {
__ shr(ToRegister(left), shift_count);
}
@@ -1866,15 +1935,16 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ ASSERT(instr->result()->IsDoubleRegister());
if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
__ push(Immediate(upper));
__ push(Immediate(lower));
- X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
+ X87Register reg = ToX87Register(instr->result());
+ X87Mov(reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
CpuFeatureScope scope1(masm(), SSE2);
- ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
if (int_val == 0) {
__ xorps(res, res);
@@ -1914,7 +1984,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value();
+ Handle<Object> handle = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(reg, handle);
}
@@ -2181,11 +2251,36 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
X87Register left = ToX87Register(instr->left());
X87Register right = ToX87Register(instr->right());
X87Register result = ToX87Register(instr->result());
- X87PrepareBinaryOp(left, right, result);
+ if (instr->op() != Token::MOD) {
+ X87PrepareBinaryOp(left, right, result);
+ }
switch (instr->op()) {
+ case Token::ADD:
+ __ fadd_i(1);
+ break;
+ case Token::SUB:
+ __ fsub_i(1);
+ break;
case Token::MUL:
__ fmul_i(1);
break;
+ case Token::DIV:
+ __ fdiv_i(1);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ X87Mov(Operand(esp, 1 * kDoubleSize), right);
+ X87Mov(Operand(esp, 0), left);
+ X87PrepareToWrite(result);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -2267,7 +2362,6 @@ void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
EmitBranch(instr, not_zero);
@@ -2411,6 +2505,10 @@ void LCodeGen::EmitGoto(int block) {
}
+void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
+}
+
+
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
@@ -2817,15 +2915,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ LInstanceOfKnownGlobal* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2833,7 +2932,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
Label done, false_result;
Register object = ToRegister(instr->value());
@@ -3785,14 +3884,16 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LMathAbs* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3811,7 +3912,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4025,82 +4126,66 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
CpuFeatureScope scope(masm(), SSE2);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- ASSERT(ToRegister(instr->global_object()).is(eax));
+
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ mov(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ test(ecx, ecx);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
- // ecx: state[0]
- // eax: state[1]
+ Register state1 = ToRegister(instr->scratch2());
+ __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ movzx_w(edx, ecx);
- __ imul(edx, edx, 18273);
- __ shr(ecx, 16);
- __ add(ecx, edx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzx_w(scratch3, state0);
+ __ imul(scratch3, scratch3, 18273);
+ __ shr(state0, 16);
+ __ add(state0, scratch3);
// Save state[0].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
+ __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(edx, eax);
- __ imul(edx, edx, 36969);
- __ shr(eax, 16);
- __ add(eax, edx);
+ __ movzx_w(scratch3, state1);
+ __ imul(scratch3, scratch3, 36969);
+ __ shr(state1, 16);
+ __ add(state1, scratch3);
// Save state[1].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
+ __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shl(ecx, 14);
- __ and_(eax, Immediate(0x3FFFF));
- __ add(eax, ecx);
+ Register random = state0;
+ __ shl(random, 14);
+ __ and_(state1, Immediate(0x3FFFF));
+ __ add(random, state1);
- __ bind(deferred->exit());
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // Convert 32 random bits in random to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, ebx);
- __ movd(xmm1, eax);
- __ cvtss2sd(xmm2, xmm2);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in eax.
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // We use xmm0 as fixed scratch register here.
+ XMMRegister scratch4 = xmm0;
+ __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(scratch4, scratch3);
+ __ movd(result, random);
+ __ cvtss2sd(scratch4, scratch4);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
@@ -4328,6 +4413,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4760,18 +4853,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStringCharCodeAt(LCodeGen* codegen,
+ LStringCharCodeAt* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
StringCharLoadGenerator::Generate(masm(),
factory(),
@@ -4815,18 +4912,22 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStringCharFromCode(LCodeGen* codegen,
+ LStringCharFromCode* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -4871,15 +4972,20 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
__ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ } else if (input->IsRegister()) {
+ Register input_reg = ToRegister(input);
+ __ push(input_reg);
+ X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
+ __ pop(input_reg);
} else {
- UNREACHABLE();
+ X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
}
}
@@ -4907,14 +5013,16 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredNumberTagI(LCodeGen* codegen,
+ LNumberTagI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4923,7 +5031,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred =
+ new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
@@ -4931,14 +5040,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredNumberTagU(LCodeGen* codegen,
+ LNumberTagU* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4947,7 +5058,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ DeferredNumberTagU* deferred =
+ new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
__ cmp(reg, Immediate(Smi::kMaxValue));
__ j(above, deferred->entry());
__ SmiTag(reg);
@@ -5034,12 +5146,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredNumberTagD(LCodeGen* codegen,
+ LNumberTagD* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -5053,7 +5169,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
X87LoadForUsage(src);
}
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred =
+ new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
if (FLAG_inline_new) {
Register tmp = ToRegister(instr->temp());
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
@@ -5172,11 +5289,13 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
}
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ push(input_reg);
+ // Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ push(temp_reg);
__ fild_s(Operand(esp, 0));
- __ pop(input_reg);
- __ SmiTag(input_reg); // Retag smi.
+ __ add(esp, Immediate(kPointerSize));
__ bind(&done);
X87CommitWrite(res_reg);
}
@@ -5232,113 +5351,63 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- // Smi to XMM conversion
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(input_reg));
- __ SmiTag(input_reg); // Retag smi.
+ // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
if (instr->truncating()) {
+ Label heap_number, slow_case;
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
+
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(done);
__ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatureScope scope(masm(), SSE3);
- Label convert;
- // Use more powerful conversion when sse3 is available.
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Get exponent alone and check for too-big exponent.
- __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(input_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- __ RecordComment("Deferred TaggedToI: exponent too big");
- DeoptimizeIf(no_condition, instr->environment());
-
- // Reserve space for 64 bit answer.
- __ bind(&convert);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cmp(input_reg, 0x80000000u);
- __ j(not_equal, &done);
- // Check if the input was 0x8000000 (kMinInt).
- // If no, then we got an overflow and we deoptimize.
- ExternalReference min_int = ExternalReference::address_of_min_int();
- __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
- __ ucomisd(xmm_temp, xmm0);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- } else {
- UNREACHABLE();
- }
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- // Deoptimize if we don't have a heap number.
- __ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cvtsi2sd(xmm_temp, Operand(input_reg));
- __ ucomisd(xmm0, xmm_temp);
- __ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
- __ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- __ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
- UNREACHABLE();
+ Label bailout;
+ XMMRegister scratch = (instr->temp() != NULL)
+ ? ToDoubleRegister(instr->temp())
+ : no_xmm_reg;
+ __ TaggedToI(input_reg, input_reg, scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout);
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredTaggedToI(LCodeGen* codegen,
+ LTaggedToI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5348,7 +5417,8 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiUntag(input_reg);
@@ -5356,179 +5426,18 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
}
-void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
- Label done, heap_number;
- Register result_reg = ToRegister(instr->result());
- Register input_reg = ToRegister(instr->value());
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kFar);
- __ bind(&heap_number);
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Surprisingly, all of this crazy bit manipulation is considerably
- // faster than using the built-in x86 CPU conversion functions (about 6x).
- Label right_exponent, adjust_bias, zero_result;
- Register scratch = ToRegister(instr->scratch());
- Register scratch2 = ToRegister(instr->scratch2());
- // Get exponent word.
- __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- if (instr->truncating()) {
- __ j(zero, &zero_result);
- } else {
- __ j(not_zero, &adjust_bias);
- __ test(scratch, Immediate(HeapNumber::kMantissaMask));
- DeoptimizeIf(not_zero, instr->environment());
- __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&adjust_bias);
- }
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- if (!instr->truncating()) {
- DeoptimizeIf(negative, instr->environment());
- } else {
- __ j(negative, &zero_result);
- }
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- Register scratch3 = ToRegister(instr->scratch3());
- __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ xor_(result_reg, result_reg);
-
- const uint32_t non_int32_exponent = 31;
- __ cmp(scratch2, Immediate(non_int32_exponent));
- // If we have a match of the int32 exponent then skip some logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the number doesn't find in an int32, deopt.
- DeoptimizeIf(greater, instr->environment());
-
- // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
- // < 31.
- __ mov(result_reg, Immediate(31));
- __ sub(result_reg, scratch2);
-
- __ bind(&right_exponent);
-
- // Save off exponent for negative check later.
- __ mov(scratch2, scratch);
-
- // Here result_reg is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch, shift_distance);
- if (!instr->truncating()) {
- // If not truncating, a non-zero value in the bottom 22 bits means a
- // non-integral value --> trigger a deopt.
- __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
- DeoptimizeIf(not_equal, instr->environment());
- }
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch3, 32 - shift_distance);
- __ or_(scratch3, scratch);
- if (!instr->truncating()) {
- // If truncating, a non-zero value in the bits that will be shifted away
- // when adjusting the exponent means rounding --> deopt.
- __ mov(scratch, 0x1);
- ASSERT(result_reg.is(ecx));
- __ shl_cl(scratch);
- __ dec(scratch);
- __ test(scratch3, scratch);
- DeoptimizeIf(not_equal, instr->environment());
- }
- // Move down according to the exponent.
- ASSERT(result_reg.is(ecx));
- __ shr_cl(scratch3);
- // Now the unsigned 32-bit answer is in scratch3. We need to move it to
- // result_reg and we may need to fix the sign.
- Label negative_result;
- __ xor_(result_reg, result_reg);
- __ cmp(scratch2, result_reg);
- __ j(less, &negative_result, Label::kNear);
- __ cmp(scratch3, result_reg);
- __ mov(result_reg, scratch3);
- // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
- DeoptimizeIf(less, instr->environment());
- __ jmp(&done, Label::kNear);
- __ bind(&zero_result);
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
- __ bind(&negative_result);
- __ sub(result_reg, scratch3);
- if (!instr->truncating()) {
- // -0.0 triggers a deopt.
- DeoptimizeIf(zero, instr->environment());
- }
- // If the negative subtraction overflows into a positive number, there was an
- // overflow --> deopt.
- DeoptimizeIf(positive, instr->environment());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
- class DeferredTaggedToINoSSE2: public LDeferredCode {
- public:
- DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToINoSSE2* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register input_reg = ToRegister(input);
- ASSERT(input_reg.is(ToRegister(instr->result())));
-
- DeferredTaggedToINoSSE2* deferred =
- new(zone()) DeferredTaggedToINoSSE2(this, instr);
-
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg); // Untag smi.
- __ bind(deferred->exit());
-}
-
-
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* temp = instr->temp();
- ASSERT(temp == NULL || temp->IsRegister());
+ ASSERT(temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+ Register temp_reg = ToRegister(temp);
HValue* value = instr->hydrogen()->value();
NumberUntagDMode mode = value->representation().IsSmi()
@@ -5561,43 +5470,34 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
-
- XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- __ cvttsd2si(result_reg, Operand(input_reg));
-
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label fast_case_succeeded;
- __ cmp(result_reg, 0x80000000u);
- __ j(not_equal, &fast_case_succeeded);
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(MemOperand(esp, 0), input_reg);
- DoubleToIStub stub(esp, result_reg, 0, true);
- __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- __ add(esp, Immediate(kDoubleSize));
- __ bind(&fast_case_succeeded);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ TruncateDoubleToI(result_reg, input_reg);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ TruncateX87TOSToI(result_reg);
+ }
} else {
- Label done;
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
+ Label bailout, done;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
}
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
__ bind(&done);
}
}
@@ -5608,31 +5508,25 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
-
- XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
+ Label bailout, done;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
}
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+
__ SmiTag(result_reg);
DeoptimizeIf(overflow, instr->environment());
}
@@ -5699,15 +5593,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (instr->hydrogen()->target_in_new_space()) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ cmp(reg, Operand::ForCell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, target);
+ __ cmp(operand, object);
}
DeoptimizeIf(not_equal, instr->environment());
}
@@ -5729,17 +5623,20 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
+ DeferredCheckMaps(LCodeGen* codegen,
+ LCheckMaps* instr,
+ Register object,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5756,7 +5653,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
__ bind(deferred->check_maps());
}
@@ -5952,18 +5849,22 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredAllocate(LCodeGen* codegen,
+ LAllocate* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
+ new(zone()) DeferredAllocate(this, instr, x87_stack_);
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
@@ -6114,7 +6015,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ push(Immediate(instr->hydrogen()->shared_info()));
+ __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
@@ -6301,12 +6202,16 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStackCheck(LCodeGen* codegen,
+ LStackCheck* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -6325,8 +6230,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(esi));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6335,7 +6241,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr, x87_stack_);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -6362,9 +6268,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index aa8f6c248..769917f7e 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -45,7 +45,7 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -68,7 +68,7 @@ class LCodeGen BASE_EMBEDDED {
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
- x87_stack_depth_(0),
+ x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple),
@@ -122,14 +122,23 @@ class LCodeGen BASE_EMBEDDED {
void X87Mov(X87Register reg, Operand src,
X87OperandType operand = kX87DoubleOperand);
- void X87Mov(Operand src, X87Register reg);
+ void X87Mov(Operand src, X87Register reg,
+ X87OperandType operand = kX87DoubleOperand);
void X87PrepareBinaryOp(
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
- void X87PrepareToWrite(X87Register reg);
- void X87CommitWrite(X87Register reg);
+ void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
+ void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
+
+ void X87Fxch(X87Register reg, int other_slot = 0) {
+ x87_stack_.Fxch(reg, other_slot);
+ }
+
+ bool X87StackEmpty() {
+ return x87_stack_.depth() == 0;
+ }
Handle<Object> ToHandle(LConstantOperand* op) const;
@@ -154,11 +163,9 @@ class LCodeGen BASE_EMBEDDED {
LOperand* value,
IntegerSignedness signedness);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -225,6 +232,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -399,15 +409,13 @@ class LCodeGen BASE_EMBEDDED {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
- void X87Fxch(X87Register reg, int other_slot = 0);
void X87Fld(Operand src, X87OperandType opts);
- void X87Free(X87Register reg);
- void FlushX87StackIfNecessary(LInstruction* instr);
void EmitFlushX87ForDeopt();
- bool X87StackContains(X87Register reg);
- int X87ArrayIndex(X87Register reg);
- int x87_st2idx(int pos);
+ void FlushX87StackIfNecessary(LInstruction* instr) {
+ x87_stack_.FlushIfNecessary(instr, this);
+ }
+ friend class LGapResolver;
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
@@ -438,8 +446,55 @@ class LCodeGen BASE_EMBEDDED {
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
- X87Register x87_stack_[X87Register::kNumAllocatableRegisters];
- int x87_stack_depth_;
+
+ class X87Stack {
+ public:
+ explicit X87Stack(MacroAssembler* masm)
+ : stack_depth_(0), is_mutable_(true), masm_(masm) { }
+ explicit X87Stack(const X87Stack& other)
+ : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
+ for (int i = 0; i < stack_depth_; i++) {
+ stack_[i] = other.stack_[i];
+ }
+ }
+ bool operator==(const X87Stack& other) const {
+ if (stack_depth_ != other.stack_depth_) return false;
+ for (int i = 0; i < stack_depth_; i++) {
+ if (!stack_[i].is(other.stack_[i])) return false;
+ }
+ return true;
+ }
+ bool Contains(X87Register reg);
+ void Fxch(X87Register reg, int other_slot = 0);
+ void Free(X87Register reg);
+ void PrepareToWrite(X87Register reg);
+ void CommitWrite(X87Register reg);
+ void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
+ void LeavingBlock(int current_block_id, LGoto* goto_instr);
+ int depth() const { return stack_depth_; }
+ void pop() {
+ ASSERT(is_mutable_);
+ stack_depth_--;
+ }
+ void push(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(stack_depth_ < X87Register::kNumAllocatableRegisters);
+ stack_[stack_depth_] = reg;
+ stack_depth_++;
+ }
+
+ MacroAssembler* masm() const { return masm_; }
+
+ private:
+ int ArrayIndex(X87Register reg);
+ int st2idx(int pos);
+
+ X87Register stack_[X87Register::kNumAllocatableRegisters];
+ int stack_depth_;
+ bool is_mutable_;
+ MacroAssembler* masm_;
+ };
+ X87Stack x87_stack_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -452,7 +507,7 @@ class LCodeGen BASE_EMBEDDED {
int old_position_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
@@ -479,23 +534,26 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
- explicit LDeferredCode(LCodeGen* codegen)
+ explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
: codegen_(codegen),
external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
+ instruction_index_(codegen->current_instruction_),
+ x87_stack_(x87_stack) {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
+ const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -506,7 +564,9 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ Label done_;
int instruction_index_;
+ LCodeGen::X87Stack x87_stack_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
index 3a58f585c..4aff241f4 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index c93c3cfe2..ca1e60d64 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -32,6 +32,7 @@
#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -290,6 +291,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -475,6 +484,14 @@ LPlatformChunk* LChunkBuilder::Build() {
USE(alignment_state_index);
}
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -772,12 +789,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseFixed(right_value, ecx);
@@ -789,12 +801,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -947,6 +954,16 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2) && instr->IsGoto() &&
+ LGoto::cast(instr)->jumps_to_join()) {
+ // TODO(olivf) Since phis of spilled values are joined as registers
+ // (not in the stack slot), we need to allow the goto gaps to keep one
+ // x87 register alive. To ensure all other values are still spilled, we
+ // insert a fpu register barrier right before.
+ LClobberDoubles* clobber = new(zone()) LClobberDoubles();
+ clobber->set_hydrogen_value(current);
+ chunk_->AddInstruction(clobber, current_block_);
+ }
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
@@ -1039,7 +1056,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1051,7 +1068,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
+ return new(zone()) LGoto(successor);
}
ToBooleanStub::Types expected = instr->expected_input_types();
@@ -1160,6 +1177,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1690,10 +1715,14 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsSmiOrTagged());
- LOperand* global_object = UseFixed(instr->global_object(), eax);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ ASSERT(instr->global_object()->representation().IsTagged());
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
@@ -1918,9 +1947,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
- LOperand* temp = instr->deoptimize_on_minus_zero()
- ? TempRegister()
- : NULL;
+ LOperand* temp = TempRegister();
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
@@ -1932,26 +1959,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- if (instr->value()->type().IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- LOperand* value = UseRegister(instr->value());
- LOperand* xmm_temp =
- (truncating && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- LOperand* value = UseFixed(instr->value(), ecx);
- LTaggedToINoSSE2* res =
- new(zone()) LTaggedToINoSSE2(value, TempRegister(),
- TempRegister(), TempRegister());
- return AssignEnvironment(DefineFixed(res, ecx));
- }
+ LOperand* xmm_temp =
+ (CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
+ ? FixedTemp(xmm1) : NULL;
+ LTaggedToI* res = new(zone()) LTaggedToI(UseRegister(val), xmm_temp);
+ return AssignEnvironment(DefineSameAsFirst(res));
}
}
} else if (from.IsDouble()) {
@@ -1971,7 +1989,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
+ bool needs_temp = CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating;
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -2046,14 +2064,14 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // If the object is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the object gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
- LOperand* value = instr->target_in_new_space()
+ LOperand* value = instr->object_in_new_space()
? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
@@ -2540,10 +2558,23 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kNotEnoughSpillSlotsForOsr);
+ spill_index = 0;
+ }
+ if (spill_index == 0) {
+ // The dynamic frame alignment state overwrites the first local.
+ // The first local is saved at the end of the unoptimized frame.
+ spill_index = graph()->osr()->UnoptimizedFrameSlots();
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2567,6 +2598,8 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
// There are no real uses of a captured object.
return NULL;
}
@@ -2615,20 +2648,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 7ae87a08c..3a609c991 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -62,17 +62,18 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
+ V(ClobberDoubles) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -161,6 +162,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -174,7 +176,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
- V(TaggedToINoSSE2) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
@@ -188,13 +189,17 @@ class LCodeGen;
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -204,7 +209,7 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
@@ -213,7 +218,7 @@ class LInstruction: public ZoneObject {
set_position(RelocInfo::kNoPosition);
}
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -273,10 +278,9 @@ class LInstruction: public ZoneObject {
bool ClobbersRegisters() const { return IsCall(); }
virtual bool ClobbersDoubleRegisters() const {
return IsCall() ||
- (!CpuFeatures::IsSupported(SSE2) &&
- // We only have rudimentary X87Stack tracking, thus in general
- // cannot handle deoptimization nor phi-nodes.
- (HasEnvironment() || IsControl()));
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle phi-nodes.
+ (!CpuFeatures::IsSafeForSnapshot(SSE2) && IsControl());
}
virtual bool HasResult() const = 0;
@@ -319,11 +323,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -334,15 +340,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block) : block_(block) {
parallel_moves_[BEFORE] = NULL;
@@ -352,8 +358,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -389,11 +395,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -401,29 +407,42 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ LClobberDoubles() { ASSERT(!CpuFeatures::IsSafeForSnapshot(SSE2)); }
+
+ virtual bool ClobbersDoubleRegisters() const { return true; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
+};
+
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
- int block_id() const { return block_id_; }
+ bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -432,22 +451,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -463,14 +484,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 1, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -487,9 +510,11 @@ class LCallStub: public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
@@ -499,7 +524,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -538,7 +563,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LWrapReceiver(LOperand* receiver,
LOperand* function,
@@ -556,7 +581,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -577,7 +602,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -591,11 +616,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -607,20 +632,20 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -637,7 +662,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -655,7 +680,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -674,7 +699,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -691,7 +716,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -714,7 +739,7 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -727,7 +752,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 2, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
@@ -744,7 +769,7 @@ class LMathRound: public LTemplateInstruction<1, 2, 1> {
};
-class LMathAbs: public LTemplateInstruction<1, 2, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -759,7 +784,7 @@ class LMathAbs: public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -771,7 +796,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -783,7 +808,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -795,7 +820,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -807,7 +832,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
LOperand* temp1,
@@ -826,7 +851,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -838,7 +863,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
@@ -854,7 +879,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -868,7 +893,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -881,7 +906,7 @@ class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -893,11 +918,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -910,7 +935,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -923,11 +948,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -938,11 +963,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -956,11 +981,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -975,13 +1000,13 @@ class LStringCompareAndBranch: public LControlInstruction<3, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -995,11 +1020,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1012,7 +1037,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1023,11 +1049,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -1040,7 +1066,7 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1056,11 +1082,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 3, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1075,7 +1101,7 @@ class LCmpT: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1089,7 +1115,7 @@ class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1108,7 +1134,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1117,7 +1144,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1130,7 +1157,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1145,7 +1172,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1162,7 +1189,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1184,7 +1211,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1199,7 +1226,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1208,7 +1235,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1217,7 +1244,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1232,7 +1259,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1243,16 +1270,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 1> {
+class LBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1265,11 +1294,11 @@ class LBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LControlInstruction<1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1284,7 +1313,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 0> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1296,7 +1325,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1309,7 +1338,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1324,7 +1353,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
: index_(index) {
@@ -1345,7 +1374,7 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1369,7 +1398,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 2, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1383,7 +1412,7 @@ class LThrow: public LTemplateInstruction<0, 2, 0> {
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1403,7 +1432,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1418,7 +1447,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1433,20 +1462,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1459,16 +1497,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1484,9 +1524,11 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
Token::Value op() const { return op_; }
@@ -1495,7 +1537,7 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
};
-class LReturn: public LTemplateInstruction<0, 3, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value, LOperand* context,
LOperand* parameter_count) {
@@ -1518,17 +1560,12 @@ class LReturn: public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
}
- virtual bool ClobbersDoubleRegisters() const {
- return !CpuFeatures::IsSupported(SSE2) &&
- !hydrogen()->representation().IsDouble();
- }
-
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
@@ -1536,7 +1573,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -1553,7 +1590,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1568,7 +1605,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1581,7 +1619,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1596,7 +1634,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const {
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
return !CpuFeatures::IsSupported(SSE2) &&
!IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
}
@@ -1604,7 +1642,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
@@ -1625,7 +1663,7 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
inputs_[0] = context;
@@ -1641,14 +1679,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
inputs_[0] = context;
@@ -1666,7 +1704,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
@@ -1679,7 +1717,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreGlobalGeneric(LOperand* context,
LOperand* global_object,
@@ -1701,7 +1739,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1714,11 +1752,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1735,11 +1773,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1751,7 +1789,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1764,7 +1802,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1780,21 +1835,21 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1806,7 +1861,7 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1819,7 +1874,7 @@ class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1831,7 +1886,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1843,19 +1898,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1868,13 +1923,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
@@ -1887,13 +1942,13 @@ class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 1, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNamed(LOperand* context) {
inputs_[0] = context;
@@ -1904,14 +1959,14 @@ class LCallNamed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 2, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1928,7 +1983,7 @@ class LCallFunction: public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
@@ -1939,25 +1994,25 @@ class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 2, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1970,13 +2025,13 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1989,13 +2044,13 @@ class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -2011,7 +2066,7 @@ class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2023,7 +2078,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2036,7 +2091,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2050,7 +2105,7 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -2062,7 +2117,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2076,7 +2131,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2092,7 +2147,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDoubleToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2109,7 +2164,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2123,7 +2178,7 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2140,32 +2195,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToINoSSE2: public LTemplateInstruction<1, 1, 3> {
- public:
- LTaggedToINoSSE2(LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
- LOperand* scratch2() { return temps_[1]; }
- LOperand* scratch3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToINoSSE2, "tagged-to-i-nosse2")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2177,7 +2207,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2187,14 +2217,12 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- virtual bool ClobbersDoubleRegisters() const { return false; }
-
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2212,7 +2240,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* obj,
LOperand* val,
@@ -2232,7 +2260,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
@@ -2241,7 +2269,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2256,13 +2284,13 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
@@ -2281,13 +2309,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
@@ -2307,13 +2335,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2334,7 +2362,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2343,7 +2371,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2359,7 +2387,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 3, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2376,7 +2404,7 @@ class LStringAdd: public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2393,7 +2421,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2408,20 +2436,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2436,7 +2464,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2449,7 +2477,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2461,7 +2489,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2473,7 +2501,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2485,7 +2513,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2499,7 +2527,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
// Truncating conversion from a tagged value to an int32.
-class LClampTToUint8NoSSE2: public LTemplateInstruction<1, 1, 3> {
+class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LClampTToUint8NoSSE2(LOperand* unclamped,
LOperand* temp1,
@@ -2522,7 +2550,7 @@ class LClampTToUint8NoSSE2: public LTemplateInstruction<1, 1, 3> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2535,7 +2563,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate: public LTemplateInstruction<1, 2, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
inputs_[0] = context;
@@ -2552,7 +2580,7 @@ class LAllocate: public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2565,7 +2593,7 @@ class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2578,7 +2606,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2591,7 +2619,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 2, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2605,7 +2633,7 @@ class LTypeof: public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2618,20 +2646,20 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LOsrEntry() {}
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 1, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2649,7 +2677,7 @@ class LStackCheck: public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2663,7 +2691,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2679,7 +2707,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2693,7 +2721,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2708,7 +2736,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph),
@@ -2724,7 +2752,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index f86820cba..b65d32843 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -48,6 +48,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
+ // TODO(titzer): should we just use a null handle here instead?
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
@@ -215,6 +216,236 @@ void MacroAssembler::ClampUint8(Register reg) {
}
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2si(result_reg, Operand(input_reg));
+ cmp(result_reg, 0x80000000u);
+ j(not_equal, &done, Label::kNear);
+
+ sub(esp, Immediate(kDoubleSize));
+ movdbl(MemOperand(esp, 0), input_reg);
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
+ sub(esp, Immediate(kDoubleSize));
+ fst_d(MemOperand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+}
+
+
+void MacroAssembler::X87TOSToI(Register result_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ Label done;
+ sub(esp, Immediate(kPointerSize));
+ fist_s(MemOperand(esp, 0));
+ fld(0);
+ fild_s(MemOperand(esp, 0));
+ pop(result_reg);
+ FCmp();
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fst_s(MemOperand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(not_zero, conversion_failed, dst);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ ASSERT(!input_reg.is(scratch));
+ cvttsd2si(result_reg, Operand(input_reg));
+ cvtsi2sd(scratch, Operand(result_reg));
+ ucomisd(scratch, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ and_(result_reg, 1);
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done, slow_case;
+
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(this, SSE3);
+ Label convert;
+ // Use more powerful conversion when sse3 is available.
+ // Load x87 register with heap number.
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ // Get exponent alone and check for too-big exponent.
+ mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ and_(result_reg, HeapNumber::kExponentMask);
+ const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ cmp(Operand(result_reg), Immediate(kTooBigExponent));
+ j(greater_equal, &slow_case, Label::kNear);
+
+ // Reserve space for 64 bit answer.
+ sub(Operand(esp), Immediate(kDoubleSize));
+ // Do conversion, which cannot fail because we checked the exponent.
+ fisttp_d(Operand(esp, 0));
+ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
+ add(Operand(esp), Immediate(kDoubleSize));
+ jmp(&done, Label::kNear);
+
+ // Slow case.
+ bind(&slow_case);
+ if (input_reg.is(result_reg)) {
+ // Input is clobbered. Restore number from fpu stack
+ sub(Operand(esp), Immediate(kDoubleSize));
+ fstp_d(Operand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ } else {
+ fstp(0);
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ } else if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(this, SSE2);
+ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ cmp(result_reg, 0x80000000u);
+ j(not_equal, &done, Label::kNear);
+ // Check if the input was 0x8000000 (kMinInt).
+ // If no, then we got an overflow and we deoptimize.
+ ExternalReference min_int = ExternalReference::address_of_min_int();
+ ucomisd(xmm0, Operand::StaticVariable(min_int));
+ j(not_equal, &slow_case, Label::kNear);
+ j(parity_even, &slow_case, Label::kNear); // NaN.
+ jmp(&done, Label::kNear);
+
+ // Slow case.
+ bind(&slow_case);
+ if (input_reg.is(result_reg)) {
+ // Input is clobbered. Restore number from double scratch.
+ sub(esp, Immediate(kDoubleSize));
+ movdbl(MemOperand(esp, 0), xmm0);
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, lost_precision, Label::kNear);
+
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ ASSERT(!temp.is(no_xmm_reg));
+ CpuFeatureScope scope(this, SSE2);
+
+ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ cvtsi2sd(temp, Operand(result_reg));
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, Label::kNear);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, Label::kNear);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ and_(result_reg, 1);
+ RecordComment("Deferred TaggedToI: minus zero");
+ j(not_zero, lost_precision, Label::kNear);
+ }
+ } else {
+ // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+ // should first try a fast conversion and then bailout to this slow case.
+ Label lost_precision_pop, zero_check;
+ Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
+ ? &lost_precision_pop : lost_precision;
+ sub(esp, Immediate(kPointerSize));
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ FCmp();
+ pop(result_reg);
+ j(not_equal, lost_precision_int, Label::kNear);
+ j(parity_even, lost_precision_int, Label::kNear); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(zero, &zero_check, Label::kNear);
+ fstp(0);
+ jmp(&done, Label::kNear);
+ bind(&zero_check);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fstp_s(Operand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(zero, &done, Label::kNear);
+ jmp(lost_precision, Label::kNear);
+
+ bind(&lost_precision_pop);
+ fstp(0);
+ jmp(lost_precision, Label::kNear);
+ }
+ }
+ bind(&done);
+}
+
+
+
static double kUint32Bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
@@ -1977,50 +2208,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-// If true, a Handle<T> returned by value from a function with cdecl calling
-// convention will be returned directly as a value of location_ field in a
-// register eax.
-// If false, it is returned as a pointer to a preallocated by caller memory
-// region. Pointer to this region should be passed to a function as an
-// implicit first argument.
-#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
-static const bool kReturnHandlesDirectly = true;
-#else
-static const bool kReturnHandlesDirectly = false;
-#endif
-
-
-Operand ApiParameterOperand(int index, bool returns_handle) {
- int offset = (index +(kReturnHandlesDirectly || !returns_handle ? 0 : 1));
- return Operand(esp, offset * kPointerSize);
+Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
}
-void MacroAssembler::PrepareCallApiFunction(int argc, bool returns_handle) {
- if (kReturnHandlesDirectly || !returns_handle) {
- EnterApiExitFrame(argc);
- // When handles are returned directly we don't have to allocate extra
- // space for and pass an out parameter.
- if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
- }
- } else {
- // We allocate two additional slots: return value and pointer to it.
- EnterApiExitFrame(argc + 2);
-
- // The argument slots are filled as follows:
- //
- // n + 1: output slot
- // n: arg n
- // ...
- // 1: arg1
- // 0: pointer to the output slot
-
- lea(esi, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), esi);
- if (emit_debug_code()) {
- mov(Operand(esi, 0), Immediate(0));
- }
+void MacroAssembler::PrepareCallApiFunction(int argc) {
+ EnterApiExitFrame(argc);
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
}
}
@@ -2029,7 +2225,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@@ -2085,21 +2280,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
}
Label prologue;
- if (returns_handle) {
- if (!kReturnHandlesDirectly) {
- // PrepareCallApiFunction saved pointer to the output slot into
- // callee-save register esi.
- mov(eax, Operand(esi, 0));
- }
- Label empty_handle;
- // Check if the result handle holds 0.
- test(eax, eax);
- j(zero, &empty_handle);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
- jmp(&prologue);
- bind(&empty_handle);
- }
// Load the value from ReturnValue
mov(eax, Operand(ebp, return_value_offset * kPointerSize));
@@ -2618,6 +2798,8 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
// Make sure the floating point stack is either empty or has depth items.
ASSERT(depth <= 7);
+ // This is very expensive.
+ ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
// The top-of-stack (tos) is 7 if there is one item pushed.
int tos = (8 - depth) % 8;
@@ -2774,6 +2956,11 @@ void MacroAssembler::Abort(BailoutReason reason) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
push(eax);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index d537b0b2c..e4e4533bf 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -474,6 +474,21 @@ class MacroAssembler: public Assembler {
XMMRegister scratch_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+ void TruncateX87TOSToI(Register result_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+ void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision);
// Smi tagging support.
void SmiTag(Register reg) {
@@ -782,7 +797,7 @@ class MacroAssembler: public Assembler {
// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
// etc. Saves context (esi). If space was reserved for return value then
// stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc, bool returns_handle);
+ void PrepareCallApiFunction(int argc);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers ebx, edi and
@@ -792,7 +807,6 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_ebp);
// Jump to a runtime routine.
@@ -1039,7 +1053,7 @@ inline Operand GlobalObjectOperand() {
// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index, bool returns_handle);
+Operand ApiParameterOperand(int index);
#ifdef GENERATED_CODE_COVERAGE
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index dfcc86956..d371c456c 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -711,7 +711,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -721,7 +721,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
@@ -1099,7 +1099,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index df7ad4467..354c2fdcb 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -392,6 +392,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -400,8 +405,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
}
@@ -415,7 +418,7 @@ static void CompileCallLoadPropertyWithInterceptor(
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
}
@@ -516,38 +519,65 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(),
- reinterpret_cast<void*>(function_address));
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
// v8::Arguments::implicit_args_.
- __ mov(ApiParameterOperand(2, returns_handle), eax);
+ __ mov(ApiParameterOperand(2), eax);
__ add(eax, Immediate(argc * kPointerSize));
// v8::Arguments::values_.
- __ mov(ApiParameterOperand(3, returns_handle), eax);
+ __ mov(ApiParameterOperand(3), eax);
// v8::Arguments::length_.
- __ Set(ApiParameterOperand(4, returns_handle), Immediate(argc));
+ __ Set(ApiParameterOperand(4), Immediate(argc));
// v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(5, returns_handle), Immediate(0));
+ __ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(2, returns_handle));
- __ mov(ApiParameterOperand(0, returns_handle), eax);
+ __ lea(eax, ApiParameterOperand(2));
+ __ mov(ApiParameterOperand(0), eax);
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
- ApiParameterOperand(1, returns_handle),
+ ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- returns_handle,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Copy return value.
+ __ mov(scratch, Operand(esp, 0));
+ // Assign stack space for the call arguments.
+ __ sub(esp, Immediate(stack_space * kPointerSize));
+ // Move the return address on top of the stack.
+ __ mov(Operand(esp, 0), scratch);
+ // Write holder to stack frame.
+ __ mov(Operand(esp, 1 * kPointerSize), receiver);
+ // Write receiver to stack frame.
+ int index = stack_space;
+ __ mov(Operand(esp, index-- * kPointerSize), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ mov(Operand(esp, index-- * kPointerSize), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -706,7 +736,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -1267,7 +1297,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1361,20 +1391,33 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
__ push(receiver()); // receiver
__ mov(scratch2(), esp);
ASSERT(!scratch2().is(reg));
- __ push(reg); // holder
// Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- __ mov(scratch1(), Immediate(callback));
- __ push(FieldOperand(scratch1(), ExecutableAccessorInfo::kDataOffset));
+ Register scratch = reg.is(scratch1()) ? receiver() : scratch1();
+ __ mov(scratch, Immediate(callback));
+ __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
@@ -1382,6 +1425,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// ReturnValue default value
__ push(Immediate(isolate()->factory()->undefined_value()));
__ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(reg); // holder
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
@@ -1400,29 +1444,23 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kApiArgc = 2 + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(),
- reinterpret_cast<void*>(getter_address));
- __ PrepareCallApiFunction(kApiArgc, returns_handle);
- __ mov(ApiParameterOperand(0, returns_handle), ebx); // name.
+ __ PrepareCallApiFunction(kApiArgc);
+ __ mov(ApiParameterOperand(0), ebx); // name.
__ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1, returns_handle), ebx); // arguments pointer.
+ __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
// Emitting a stub call may try to allocate (if the code is not
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
- ApiParameterOperand(2, returns_handle),
+ ApiParameterOperand(2),
kStackSpace,
- returns_handle,
- 6);
+ 7);
}
@@ -1529,7 +1567,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -2891,6 +2929,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch1(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2958,48 +3014,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ cmp(FieldOperand(receiver(), HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ mov(scratch1(), Immediate(cell));
- Operand cell_operand =
- FieldOperand(scratch1(), PropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, factory()->the_hole_value());
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ mov(cell_operand, value());
- // No write barrier here, because cells are always rescanned.
-
- // Return the value (register eax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -3256,520 +3270,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi and if SSE2 is available a heap number
- // containing a smi and branch if the check fails.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ cmp(FieldOperand(key, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, fail);
- __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, Operand(xmm_scratch0));
- __ cvtsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- // Check if the key fits in the smi range.
- __ cmp(scratch, 0xc0000000);
- __ j(sign, fail);
- __ SmiTag(scratch);
- __ mov(key, scratch);
- __ bind(&key_ok);
- } else {
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow, check_heap_number;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- __ JumpIfNotSmi(eax, &slow);
- } else {
- __ JumpIfNotSmi(eax, &check_heap_number);
- }
-
- // smi case
- __ mov(ebx, eax); // Preserve the value in eax as the return value.
- __ SmiUntag(ebx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- } else { // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
- CpuFeatures::IsSupported(SSE3)) {
- CpuFeatureScope scope(masm, SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
-
- // If conversion failed (NaN, infinity, or a number outside
- // signed int64 range), the result is 0x8000000000000000, and
- // we must handle this case in the runtime.
- Label ok;
- __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
- __ j(not_equal, &ok);
- __ cmp(Operand(esp, 0), Immediate(0));
- __ j(not_equal, &ok);
- __ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
- __ jmp(&slow);
-
- __ bind(&ok);
- __ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- } else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatureScope scope(masm, SSE2);
- __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
- __ cmp(ebx, 0x80000000u);
- __ j(equal, &slow);
- // ebx: untagged integer value
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- // Fall through.
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- __ ret(0); // Return original value.
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, grow, slow, transition_elements_kind;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ mov(FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize), eax);
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- // Do the store and update the write barrier.
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ lea(ecx, FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Make sure to preserve the value in register eax.
- __ mov(ebx, eax);
- __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
- // Restore the key, which is known to be the array length.
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
- }
-
- // Store the element at index zero.
- __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ ret(0);
-
- __ bind(&check_capacity);
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
- __ j(equal, &miss_force_generic);
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(edi);
-
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
- &transition_elements_kind, true);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(eax, &value_is_smi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Initialize the new FixedDoubleArray.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-
- __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
- &transition_elements_kind, true);
-
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
- __ mov(FieldOperand(edi, offset + kPointerSize),
- Immediate(kHoleNanUpper32));
- }
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index ca02183db..e6ff2daa6 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -43,7 +43,6 @@ Address IC::address() const {
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
- ASSERT(Isolate::Current() == isolate());
Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the address
// of the call.
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 3fa81f893..55187514f 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -375,20 +375,22 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::Clear(Address address) {
+void IC::Clear(Isolate* isolate, Address address) {
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
if (target->is_debug_stub()) return;
switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(address, target);
- case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
- case Code::STORE_IC: return StoreIC::Clear(address, target);
- case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
+ case Code::LOAD_IC: return LoadIC::Clear(isolate, address, target);
+ case Code::KEYED_LOAD_IC:
+ return KeyedLoadIC::Clear(isolate, address, target);
+ case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
+ case Code::KEYED_STORE_IC:
+ return KeyedStoreIC::Clear(isolate, address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(address, target);
+ case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
@@ -404,7 +406,7 @@ void CallICBase::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
Code* code =
- Isolate::Current()->stub_cache()->FindCallInitialize(
+ target->GetIsolate()->stub_cache()->FindCallInitialize(
target->arguments_count(),
contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
target->kind());
@@ -412,40 +414,40 @@ void CallICBase::Clear(Address address, Code* target) {
}
-void KeyedLoadIC::Clear(Address address, Code* target) {
+void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *initialize_stub());
+ SetTargetAtAddress(address, *initialize_stub(isolate));
}
-void LoadIC::Clear(Address address, Code* target) {
+void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, *initialize_stub());
+ SetTargetAtAddress(address, *initialize_stub(isolate));
}
-void StoreIC::Clear(Address address, Code* target) {
+void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict()
- : *initialize_stub());
+ ? *initialize_stub_strict(isolate)
+ : *initialize_stub(isolate));
}
-void KeyedStoreIC::Clear(Address address, Code* target) {
+void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict()
- : *initialize_stub());
+ ? *initialize_stub_strict(isolate)
+ : *initialize_stub(isolate));
}
-void CompareIC::Clear(Address address, Code* target) {
+void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
CompareIC::State handler_state;
Token::Value op;
@@ -453,7 +455,7 @@ void CompareIC::Clear(Address address, Code* target) {
&handler_state, &op);
// Only clear CompareICs that can retain objects.
if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(op));
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -500,7 +502,7 @@ static void LookupForRead(Handle<Object> object,
Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
- Handle<Object> delegate = Execution::GetFunctionDelegate(object);
+ Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
// Patch the receiver and use the delegate as the function to
@@ -564,7 +566,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<Object> result = Object::GetElement(object, index);
+ Handle<Object> result = Object::GetElement(isolate(), object, index);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
if (result->IsJSFunction()) return *result;
@@ -925,7 +927,7 @@ MaybeObject* LoadIC::Load(State state,
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
}
- return *Accessors::FunctionGetPrototype(object);
+ return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
}
@@ -994,7 +996,7 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
bool IC::UpdatePolymorphicIC(State state,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code,
StrictModeFlag strict_mode) {
@@ -1075,44 +1077,44 @@ Handle<Code> StoreIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
}
-void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void LoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_load_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicLoadIC(
receiver, handler, name);
set_target(*ic);
}
-void KeyedLoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void KeyedLoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_keyed_load_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedLoadIC(
receiver, handler, name);
set_target(*ic);
}
-void StoreIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void StoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_store_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicStoreIC(
receiver, handler, name, strict_mode);
set_target(*ic);
}
-void KeyedStoreIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void KeyedStoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_keyed_store_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedStoreIC(
receiver, handler, name, strict_mode);
set_target(*ic);
@@ -1153,7 +1155,7 @@ bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
// not necessarily equal to target()->state().
void IC::PatchCache(State state,
StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code) {
switch (state) {
@@ -1263,32 +1265,26 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
- // Bail out if the result is not cacheable.
- if (!lookup->IsCacheable()) {
- set_target(*generic_stub());
- return;
- }
+ if (!object->IsHeapObject()) return;
- // TODO(jkummerow): It would be nice to support non-JSObjects in
- // UpdateCaches, then we wouldn't need to go generic here.
- if (!object->IsJSObject()) {
- set_target(*generic_stub());
- return;
- }
+ Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
code = pre_monomorphic_stub();
+ } else if (!lookup->IsCacheable()) {
+ // Bail out if the result is not cacheable.
+ code = slow_stub();
+ } else if (!object->IsJSObject()) {
+ // TODO(jkummerow): It would be nice to support non-JSObjects in
+ // ComputeLoadHandler, then we wouldn't need to go generic here.
+ code = slow_stub();
} else {
- code = ComputeLoadHandler(lookup, receiver, name);
- if (code.is_null()) {
- set_target(*generic_stub());
- return;
- }
+ code = ComputeLoadHandler(lookup, Handle<JSObject>::cast(receiver), name);
+ if (code.is_null()) code = slow_stub();
}
PatchCache(state, kNonStrictMode, receiver, name, code);
@@ -1355,8 +1351,16 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
if (!getter->IsJSFunction()) break;
if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver) &&
+ FLAG_js_accessor_ics) {
+ return isolate()->stub_cache()->ComputeLoadCallback(
+ name, receiver, holder, call_optimization);
+ }
return isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, Handle<JSFunction>::cast(getter));
+ name, receiver, holder, function);
} else if (receiver->IsJSArray() &&
name->Equals(isolate()->heap()->length_string())) {
PropertyIndex lengthIndex =
@@ -1542,13 +1546,30 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
case CALLBACKS: {
Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
// TODO(dcarney): Handle DeclaredAccessorInfo correctly.
- if (!callback_object->IsExecutableAccessorInfo()) break;
- Handle<ExecutableAccessorInfo> callback =
- Handle<ExecutableAccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) break;
- if (!callback->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
+ if (callback_object->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> callback =
+ Handle<ExecutableAccessorInfo>::cast(callback_object);
+ if (v8::ToCData<Address>(callback->getter()) == 0) break;
+ if (!callback->IsCompatibleReceiver(*receiver)) break;
+ return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ name, receiver, holder, callback);
+ } else if (callback_object->IsAccessorPair()) {
+ Handle<Object> getter(
+ Handle<AccessorPair>::cast(callback_object)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver) &&
+ FLAG_js_accessor_ics) {
+ return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ name, receiver, holder, call_optimization);
+ }
+ }
+ break;
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(lookup->holder()));
@@ -1615,7 +1636,8 @@ static bool LookupForWrite(Handle<JSObject> receiver,
if (!value->FitsRepresentation(target_details.representation())) {
Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
Map::GeneralizeRepresentation(
- target, target->LastAdded(), value->OptimalRepresentation());
+ target, target->LastAdded(),
+ value->OptimalRepresentation(), FORCE_FIELD);
// Lookup the transition again since the transition tree may have changed
// entirely by the migration above.
receiver->map()->LookupTransition(*holder, *name, lookup);
@@ -1708,21 +1730,30 @@ MaybeObject* StoreIC::Store(State state,
}
LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, value, &lookup, &state)) {
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
- } else if (strict_mode == kStrictMode &&
- !(lookup.IsProperty() && lookup.IsReadOnly()) &&
- IsUndeclaredGlobal(object)) {
+ bool can_store = LookupForWrite(receiver, name, value, &lookup, &state);
+ if (!can_store &&
+ strict_mode == kStrictMode &&
+ !(lookup.IsProperty() && lookup.IsReadOnly()) &&
+ IsUndeclaredGlobal(object)) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
- } else if (FLAG_use_ic &&
- (lookup.IsNormal() ||
- (lookup.IsField() && lookup.CanHoldValue(value)))) {
- Handle<Code> stub = strict_mode == kStrictMode
- ? generic_stub_strict() : generic_stub();
- set_target(*stub);
+ }
+ if (FLAG_use_ic) {
+ if (state == UNINITIALIZED) {
+ Handle<Code> stub = (strict_mode == kStrictMode)
+ ? pre_monomorphic_stub_strict()
+ : pre_monomorphic_stub();
+ set_target(*stub);
+ TRACE_IC("StoreIC", name, state, *stub);
+ } else if (can_store) {
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ } else if (!name->IsCacheable(isolate()) ||
+ lookup.IsNormal() ||
+ (lookup.IsField() && lookup.CanHoldValue(value))) {
+ Handle<Code> stub = (strict_mode == kStrictMode) ? generic_stub_strict()
+ : generic_stub();
+ set_target(*stub);
+ }
}
// Set the property.
@@ -1796,6 +1827,14 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
if (!setter->IsJSFunction()) break;
if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver) &&
+ FLAG_js_accessor_ics) {
+ return isolate()->stub_cache()->ComputeStoreCallback(
+ name, receiver, holder, call_optimization, strict_mode);
+ }
return isolate()->stub_cache()->ComputeStoreViaSetter(
name, receiver, holder, Handle<JSFunction>::cast(setter),
strict_mode);
@@ -1847,18 +1886,6 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
}
- if (!FLAG_compiled_keyed_stores &&
- (store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS)) {
- // TODO(danno): We'll soon handle MONOMORPHIC ICs that also support
- // copying COW arrays and silently ignoring some OOB stores into external
- // arrays, but for now use the generic.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "COW/OOB external array");
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
- }
-
State ic_state = target()->ic_state();
Handle<Map> receiver_map(receiver->map(), isolate());
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
@@ -2139,8 +2166,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
- bool key_is_smi_like = key->IsSmi() ||
- (FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
+ bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
@@ -2593,7 +2619,7 @@ static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
if (type.IsSmi()) return BinaryOpIC::SMI;
if (type.IsInteger32()) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
return BinaryOpIC::INT32;
}
if (type.IsNumber()) return BinaryOpIC::NUMBER;
@@ -2605,7 +2631,7 @@ static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
op == Token::SAR ||
op == Token::SHL ||
op == Token::SHR) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
return BinaryOpIC::INT32;
}
return BinaryOpIC::ODDBALL;
@@ -2683,7 +2709,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
- kSmiValueSize == 32) {
+ SmiValuesAre32Bits()) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
@@ -2770,7 +2796,8 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
bool caught_exception;
Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(builtin_function,
+ Handle<Object> result = Execution::Call(isolate,
+ builtin_function,
left,
ARRAY_SIZE(builtin_args),
builtin_args,
@@ -2782,10 +2809,10 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
-Code* CompareIC::GetRawUninitialized(Token::Value op) {
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, Isolate::Current()));
+ CHECK(stub.FindCodeInCache(&code, isolate));
return code;
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index fcf0de58f..8f09e1d0a 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -102,7 +102,7 @@ class IC {
static State StateFrom(Code* target, Object* receiver, Object* name);
// Clear the inline cache to initial state.
- static void Clear(Address address);
+ static void Clear(Isolate* isolate, Address address);
// Computes the reloc info for this IC. This is a fairly expensive
// operation as it has to search through the heap to find the code
@@ -167,14 +167,14 @@ class IC {
static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(Address address, Code* target, Code* old_target);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
set_target(*handler);
}
bool UpdatePolymorphicIC(State state,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code,
StrictModeFlag strict_mode);
@@ -192,7 +192,7 @@ class IC {
bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
void PatchCache(State state,
StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
@@ -388,7 +388,7 @@ class LoadIC: public IC {
protected:
virtual Code::Kind kind() const { return Code::LOAD_IC; }
- virtual Handle<Code> generic_stub() const {
+ virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->LoadIC_Slow();
}
@@ -403,7 +403,7 @@ class LoadIC: public IC {
Handle<Object> object,
Handle<String> name);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -420,14 +420,14 @@ class LoadIC: public IC {
private:
// Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->LoadIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->LoadIC_Initialize();
}
virtual Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->LoadIC_PreMonomorphic();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -483,9 +483,12 @@ class KeyedLoadIC: public LoadIC {
virtual Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->KeyedLoadIC_Slow();
+ }
// Update the inline cache.
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -496,8 +499,8 @@ class KeyedLoadIC: public LoadIC {
private:
// Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_Initialize();
}
virtual Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
@@ -512,7 +515,7 @@ class KeyedLoadIC: public LoadIC {
return isolate()->builtins()->KeyedLoadIC_String();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -527,6 +530,9 @@ class StoreIC: public IC {
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode);
@@ -558,6 +564,12 @@ class StoreIC: public IC {
virtual Handle<Code> generic_stub_strict() const {
return isolate()->builtins()->StoreIC_Generic_Strict();
}
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return isolate()->builtins()->StoreIC_PreMonomorphic();
+ }
+ virtual Handle<Code> pre_monomorphic_stub_strict() const {
+ return isolate()->builtins()->StoreIC_PreMonomorphic_Strict();
+ }
virtual Handle<Code> global_proxy_stub() {
return isolate()->builtins()->StoreIC_GlobalProxy();
}
@@ -565,7 +577,7 @@ class StoreIC: public IC {
return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
}
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -601,13 +613,13 @@ class StoreIC: public IC {
IC::set_target(code);
}
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->StoreIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->StoreIC_Initialize();
}
- static Handle<Code> initialize_stub_strict() {
- return Isolate::Current()->builtins()->StoreIC_Initialize_Strict();
+ static Handle<Code> initialize_stub_strict(Isolate* isolate) {
+ return isolate->builtins()->StoreIC_Initialize_Strict();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -643,6 +655,9 @@ class KeyedStoreIC: public StoreIC {
static void GenerateInitialize(MacroAssembler* masm) {
GenerateMiss(masm, MISS);
}
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm, MISS);
+ }
static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
@@ -660,6 +675,12 @@ class KeyedStoreIC: public StoreIC {
Handle<Object> value);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return isolate()->builtins()->KeyedStoreIC_PreMonomorphic();
+ }
+ virtual Handle<Code> pre_monomorphic_stub_strict() const {
+ return isolate()->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ }
virtual Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
@@ -671,7 +692,7 @@ class KeyedStoreIC: public StoreIC {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -685,11 +706,11 @@ class KeyedStoreIC: public StoreIC {
}
// Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->KeyedStoreIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedStoreIC_Initialize();
}
- static Handle<Code> initialize_stub_strict() {
- return Isolate::Current()->builtins()->KeyedStoreIC_Initialize_Strict();
+ static Handle<Code> initialize_stub_strict(Isolate* isolate) {
+ return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
}
Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedStoreIC_Generic();
@@ -701,7 +722,7 @@ class KeyedStoreIC: public StoreIC {
return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key,
@@ -807,9 +828,9 @@ class CompareIC: public IC {
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- static Code* GetRawUninitialized(Token::Value op);
+ static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
Token::Value op_;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 9fb16fbe9..45076f565 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -28,20 +28,19 @@
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
-#include "isolate.h"
-
#include "debug.h"
+#include "isolate.h"
+#include "utils/random-number-generator.h"
namespace v8 {
namespace internal {
-SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+SaveContext::SaveContext(Isolate* isolate)
+ : isolate_(isolate),
+ prev_(isolate->save_context()) {
if (isolate->context() != NULL) {
context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
}
isolate->set_save_context(this);
@@ -68,6 +67,13 @@ bool Isolate::DebuggerHasBreakPoints() {
}
+RandomNumberGenerator* Isolate::random_number_generator() {
+ if (random_number_generator_ == NULL) {
+ random_number_generator_ = new RandomNumberGenerator;
+ }
+ return random_number_generator_;
+}
+
} } // namespace v8::internal
#endif // V8_ISOLATE_INL_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 7b77d893f..6fa496a90 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -39,7 +39,7 @@
#include "deoptimizer.h"
#include "heap-profiler.h"
#include "hydrogen.h"
-#include "isolate.h"
+#include "isolate-inl.h"
#include "lithium-allocator.h"
#include "log.h"
#include "marking-thread.h"
@@ -54,6 +54,7 @@
#include "spaces.h"
#include "stub-cache.h"
#include "sweeper-thread.h"
+#include "utils/random-number-generator.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -137,7 +138,7 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
int SystemThreadManager::NumberOfParallelSystemThreads(
ParallelSystemComponent type) {
- int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads);
+ int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
ASSERT(number_of_threads > 0);
if (number_of_threads == 1) {
return 0;
@@ -226,8 +227,8 @@ class PreallocatedMemoryThread: public Thread {
PreallocatedMemoryThread()
: Thread("v8:PreallocMem"),
keep_running_(true),
- wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
- data_ready_semaphore_(OS::CreateSemaphore(0)),
+ wait_for_ever_semaphore_(new Semaphore(0)),
+ data_ready_semaphore_(new Semaphore(0)),
data_(NULL),
length_(0) {
}
@@ -343,35 +344,23 @@ Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
#ifdef DEBUG
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+Mutex Isolate::process_wide_mutex_;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
-Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
- ThreadId thread_id) {
- ASSERT(!thread_id.Equals(ThreadId::Invalid()));
- PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
- {
- ScopedLock lock(process_wide_mutex_);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
- thread_data_table_->Insert(per_thread);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
- }
- return per_thread;
-}
-
-
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
- per_thread = AllocatePerIsolateThreadData(thread_id);
+ per_thread = new PerIsolateThreadData(this, thread_id);
+ thread_data_table_->Insert(per_thread);
}
}
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
return per_thread;
}
@@ -386,7 +375,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
@@ -394,7 +383,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
@@ -522,7 +511,7 @@ void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
#ifdef DEBUG
bool Isolate::IsDeferredHandle(Object** handle) {
// Each DeferredHandles instance keeps the handles to one job in the
- // parallel recompilation queue, containing a list of blocks. Each block
+ // concurrent recompilation queue, containing a list of blocks. Each block
// contains kHandleBlockSize handles except for the first block, which may
// not be fully filled.
// We iterate through all the blocks to see whether the argument handle
@@ -567,11 +556,11 @@ Handle<String> Isolate::StackTraceString() {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
+ StringStream::ClearMentionedObjectCache(this);
StringStream accumulator(&allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
+ Handle<String> stack_trace = accumulator.ToString(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
return stack_trace;
@@ -734,7 +723,9 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
Handle<String> line_key =
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
- Handle<String> script_key =
+ Handle<String> script_id_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptId"));
+ Handle<String> script_name_key =
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
Handle<String> script_name_or_source_url_key =
factory()->InternalizeOneByteString(
@@ -790,11 +781,20 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
}
+ if (options & StackTrace::kScriptId) {
+ Handle<Smi> script_id(script->id(), this);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, script_id_key, script_id,
+ NONE));
+ }
+
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name(), this);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_key, script_name, NONE));
+ stack_frame, script_name_key, script_name,
+ NONE));
}
if (options & StackTrace::kScriptNameOrSourceURL) {
@@ -860,13 +860,13 @@ void Isolate::PrintStack(FILE* out) {
allocator = preallocated_message_space_;
}
- StringStream::ClearMentionedObjectCache();
+ StringStream::ClearMentionedObjectCache(this);
StringStream accumulator(allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
accumulator.OutputToFile(out);
InitializeLoggingAndCounters();
- accumulator.Log();
+ accumulator.Log(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
if (preallocated_message_space_ == NULL) {
@@ -904,7 +904,7 @@ void Isolate::PrintStack(StringStream* accumulator) {
}
// The MentionedObjectCache is not GC-proof at the moment.
DisallowHeapAllocation no_gc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
+ ASSERT(StringStream::IsMentionedObjectCacheClear(this));
// Avoid printing anything if there are no frames.
if (c_entry_fp(thread_local_top()) == 0) return;
@@ -917,7 +917,7 @@ void Isolate::PrintStack(StringStream* accumulator) {
"\n==== Details ================================================\n\n");
PrintFrames(this, accumulator, StackFrame::DETAILS);
- accumulator->PrintMentionedObjectCache();
+ accumulator->PrintMentionedObjectCache(this);
accumulator->Add("=====================\n\n");
}
@@ -1358,7 +1358,8 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// exception object to be set later must not be turned into a string.
if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
bool failed = false;
- exception_arg = Execution::ToDetailString(exception_arg, &failed);
+ exception_arg =
+ Execution::ToDetailString(this, exception_arg, &failed);
if (failed) {
exception_arg = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("exception"));
@@ -1400,17 +1401,19 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// to the console for easier debugging.
int line_number = GetScriptLineNumberSafe(location->script(),
location->start_pos());
- if (exception->IsString()) {
+ if (exception->IsString() && location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
*String::cast(exception)->ToCString(),
*String::cast(location->script()->name())->ToCString(),
line_number + 1);
- } else {
+ } else if (location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
*String::cast(location->script()->name())->ToCString(),
line_number + 1);
+ } else {
+ OS::PrintError("Extension or internal compilation error.\n");
}
}
}
@@ -1703,15 +1706,6 @@ void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
}
-void Isolate::ThreadDataTable::Remove(Isolate* isolate,
- ThreadId thread_id) {
- PerIsolateThreadData* data = Lookup(isolate, thread_id);
- if (data != NULL) {
- Remove(data);
- }
-}
-
-
void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
PerIsolateThreadData* data = list_;
while (data != NULL) {
@@ -1748,11 +1742,7 @@ Isolate::Isolate()
compilation_cache_(NULL),
counters_(NULL),
code_range_(NULL),
- // Must be initialized early to allow v8::SetResourceConstraints calls.
- break_access_(OS::CreateMutex()),
debugger_initialized_(false),
- // Must be initialized early to allow v8::Debug calls.
- debugger_access_(OS::CreateMutex()),
logger_(NULL),
stats_table_(NULL),
stub_cache_(NULL),
@@ -1783,6 +1773,12 @@ Isolate::Isolate()
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
+ // TODO(bmeurer) Initialized lazily because it depends on flags; can
+ // be fixed once the default isolate cleanup is done.
+ random_number_generator_(NULL),
+ is_memory_constrained_(false),
+ has_fatal_error_(false),
+ use_crankshaft_(true),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
@@ -1791,7 +1787,6 @@ Isolate::Isolate()
optimizing_compiler_thread_(this),
marking_thread_(NULL),
sweeper_thread_(NULL),
- callback_table_(NULL),
stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1853,7 +1848,7 @@ void Isolate::TearDown() {
Deinit();
- { ScopedLock lock(process_wide_mutex_);
+ { LockGuard<Mutex> lock_guard(&process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
}
@@ -1884,7 +1879,7 @@ void Isolate::Deinit() {
debugger()->UnloadDebugger();
#endif
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
+ if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Stop();
if (FLAG_sweeper_threads > 0) {
for (int i = 0; i < FLAG_sweeper_threads; i++) {
@@ -2024,10 +2019,6 @@ Isolate::~Isolate() {
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
- delete debugger_access_;
- debugger_access_ = NULL;
delete compilation_cache_;
compilation_cache_ = NULL;
@@ -2061,8 +2052,8 @@ Isolate::~Isolate() {
delete external_reference_table_;
external_reference_table_ = NULL;
- delete callback_table_;
- callback_table_ = NULL;
+ delete random_number_generator_;
+ random_number_generator_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
delete debugger_;
@@ -2129,7 +2120,7 @@ void Isolate::InitializeLoggingAndCounters() {
void Isolate::InitializeDebugger() {
#ifdef ENABLE_DEBUGGER_SUPPORT
- ScopedLock lock(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access());
if (NoBarrier_Load(&debugger_initialized_)) return;
InitializeLoggingAndCounters();
debug_ = new Debug(this);
@@ -2141,11 +2132,16 @@ void Isolate::InitializeDebugger() {
bool Isolate::Init(Deserializer* des) {
ASSERT(state_ != INITIALIZED);
- ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
stress_deopt_count_ = FLAG_deopt_every_n_times;
+ has_fatal_error_ = false;
+
+ use_crankshaft_ = FLAG_crankshaft
+ && !Serializer::enabled()
+ && CPU::SupportsCrankshaft();
+
if (function_entry_hook() != NULL) {
// When function entry hooking is in effect, we have to create the code
// stubs from scratch to get entry hooks, rather than loading the previously
@@ -2164,8 +2160,7 @@ bool Isolate::Init(Deserializer* des) {
memory_allocator_ = new MemoryAllocator(this);
code_range_ = new CodeRange(this);
- // Safe after setting Heap::isolate_, initializing StackGuard and
- // ensuring that Isolate::Current() == this.
+ // Safe after setting Heap::isolate_, and initializing StackGuard
heap_.SetStackLimits();
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
@@ -2177,7 +2172,7 @@ bool Isolate::Init(Deserializer* des) {
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache();
+ transcendental_cache_ = new TranscendentalCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
@@ -2238,7 +2233,7 @@ bool Isolate::Init(Deserializer* des) {
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
- builtins_.SetUp(create_heap_objects);
+ builtins_.SetUp(this, create_heap_objects);
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@@ -2262,7 +2257,7 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
- des->Deserialize();
+ des->Deserialize(this);
}
stub_cache_->Initialize();
@@ -2327,9 +2322,10 @@ bool Isolate::Init(Deserializer* des) {
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
+ FastNewClosureStub::InstallDescriptors(this);
}
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+ if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start();
if (FLAG_marking_threads > 0) {
marking_thread_ = new MarkingThread*[FLAG_marking_threads];
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 401505afd..b826ec596 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -52,7 +52,6 @@ namespace v8 {
namespace internal {
class Bootstrapper;
-class CallbackTable;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
@@ -78,6 +77,7 @@ class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
class MarkingThread;
class PreallocatedMemoryThread;
+class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
class UnicodeCache;
@@ -321,7 +321,6 @@ class SystemThreadManager {
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(v8::Debug::EventCallback, debug_event_callback, NULL) \
V(DebuggerAgent*, debugger_agent_instance, NULL)
#else
@@ -361,7 +360,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
- V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
@@ -544,10 +542,10 @@ class Isolate {
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
+ RecursiveMutex* break_access() { return &break_access_; }
// Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
+ RecursiveMutex* debugger_access() { return &debugger_access_; }
Address get_address_from_id(AddressId id);
@@ -1062,6 +1060,11 @@ class Isolate {
thread_local_top_.top_lookup_result_ = top;
}
+ bool IsDead() { return has_fatal_error_; }
+ void SignalFatalError() { has_fatal_error_ = true; }
+
+ bool use_crankshaft() const { return use_crankshaft_; }
+
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
@@ -1111,13 +1114,6 @@ class Isolate {
return sweeper_thread_;
}
- CallbackTable* callback_table() {
- return callback_table_;
- }
- void set_callback_table(CallbackTable* callback_table) {
- callback_table_ = callback_table;
- }
-
int id() const { return static_cast<int>(id_); }
HStatistics* GetHStatistics();
@@ -1130,9 +1126,18 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
+ inline RandomNumberGenerator* random_number_generator();
+
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
+ bool is_memory_constrained() const {
+ return is_memory_constrained_;
+ }
+ void set_is_memory_constrained(bool value) {
+ is_memory_constrained_ = value;
+ }
+
private:
Isolate();
@@ -1160,7 +1165,6 @@ class Isolate {
PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
- void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
void RemoveAllThreads(Isolate* isolate);
@@ -1195,7 +1199,7 @@ class Isolate {
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
- static Mutex* process_wide_mutex_;
+ static Mutex process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
@@ -1211,10 +1215,6 @@ class Isolate {
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- // Allocate and insert PerIsolateThreadData into the ThreadDataTable
- // (regardless of whether such data already exists).
- PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
-
// Find the PerThread for this particular (isolate, thread) combination.
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
@@ -1263,9 +1263,9 @@ class Isolate {
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- Mutex* break_access_;
+ RecursiveMutex break_access_;
Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
+ RecursiveMutex debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
@@ -1309,6 +1309,14 @@ class Isolate {
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
+ RandomNumberGenerator* random_number_generator_;
+ bool is_memory_constrained_;
+
+ // True if fatal error has been signaled for this isolate.
+ bool has_fatal_error_;
+
+ // True if we are using the Crankshaft optimizing compiler.
+ bool use_crankshaft_;
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
@@ -1363,7 +1371,6 @@ class Isolate {
OptimizingCompilerThread optimizing_compiler_thread_;
MarkingThread** marking_thread_;
SweeperThread** sweeper_thread_;
- CallbackTable* callback_table_;
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
@@ -1396,15 +1403,8 @@ class SaveContext BASE_EMBEDDED {
inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
- if (context_.is_null()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_context(NULL);
- isolate->set_save_context(prev_);
- } else {
- Isolate* isolate = context_->GetIsolate();
- isolate->set_context(*context_);
- isolate->set_save_context(prev_);
- }
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
}
Handle<Context> context() { return context_; }
@@ -1416,10 +1416,8 @@ class SaveContext BASE_EMBEDDED {
}
private:
+ Isolate* isolate_;
Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
SaveContext* prev_;
Address c_entry_fp_;
};
@@ -1428,21 +1426,44 @@ class SaveContext BASE_EMBEDDED {
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange() :
- scope_(Isolate::Current()),
- context_(Isolate::Current()->context(), Isolate::Current()) {
+ AssertNoContextChange()
+ : isolate_(Isolate::Current()),
+ context_(isolate_->context()) { }
+ ~AssertNoContextChange() {
+ ASSERT(isolate_->context() == *context_);
}
- ~AssertNoContextChange() {
- ASSERT(Isolate::Current()->context() == *context_);
+ private:
+ Isolate* isolate_;
+ Handle<Context> context_;
+#else
+ public:
+ AssertNoContextChange() { }
+#endif
+};
+
+
+// TODO(mstarzinger): Depracate as soon as everything is handlified.
+class AssertNoContextChangeWithHandleScope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ AssertNoContextChangeWithHandleScope() :
+ isolate_(Isolate::Current()),
+ scope_(isolate_),
+ context_(isolate_->context(), isolate_) {
+ }
+
+ ~AssertNoContextChangeWithHandleScope() {
+ ASSERT(isolate_->context() == *context_);
}
private:
+ Isolate* isolate_;
HandleScope scope_;
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
+ AssertNoContextChangeWithHandleScope() { }
#endif
};
@@ -1454,11 +1475,11 @@ class ExecutionAccess BASE_EMBEDDED {
}
~ExecutionAccess() { Unlock(isolate_); }
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+ static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
+ return isolate->break_access()->TryLock();
}
private:
@@ -1502,12 +1523,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
};
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
-#define ISOLATE (v8::internal::Isolate::Current())
-
-
// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
return native_context()->out_of_memory()->IsTrue();
@@ -1516,7 +1531,7 @@ inline bool Context::has_out_of_memory() {
// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(HEAP->true_value());
+ native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
}
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index ebfaf9928..0d17b356a 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -367,7 +367,7 @@ Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
Handle<Object> argv[] = { key };
bool has_exception = false;
HandleScope scope(isolate_);
- object = Execution::Call(fun, object, 1, argv, &has_exception);
+ object = Execution::Call(isolate_, fun, object, 1, argv, &has_exception);
// Return empty handle to signal an exception.
if (has_exception) return Handle<Object>::null();
return scope.CloseAndEscape(object);
@@ -470,7 +470,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
Handle<Object> argv[] = { key, object };
bool has_exception = false;
Handle<Object> result =
- Execution::Call(builtin, object, 2, argv, &has_exception);
+ Execution::Call(isolate_, builtin, object, 2, argv, &has_exception);
if (has_exception) return EXCEPTION;
if (result->IsUndefined()) return UNCHANGED;
if (deferred_key) {
@@ -495,11 +495,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
bool has_exception = false;
String* class_name = object->class_name();
if (class_name == isolate_->heap()->String_string()) {
- Handle<Object> value = Execution::ToString(object, &has_exception);
+ Handle<Object> value =
+ Execution::ToString(isolate_, object, &has_exception);
if (has_exception) return EXCEPTION;
SerializeString(Handle<String>::cast(value));
} else if (class_name == isolate_->heap()->Number_string()) {
- Handle<Object> value = Execution::ToNumber(object, &has_exception);
+ Handle<Object> value =
+ Execution::ToNumber(isolate_, object, &has_exception);
if (has_exception) return EXCEPTION;
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
@@ -600,12 +602,12 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
Handle<JSArray> object, int length) {
for (int i = 0; i < length; i++) {
if (i > 0) Append(',');
- Handle<Object> element = Object::GetElement(object, i);
+ Handle<Object> element = Object::GetElement(isolate_, object, i);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, element, EXCEPTION);
if (element->IsUndefined()) {
AppendAscii("null");
} else {
- Result result = SerializeElement(object->GetIsolate(), element, i);
+ Result result = SerializeElement(isolate_, element, i);
if (result == SUCCESS) continue;
if (result == UNCHANGED) {
AppendAscii("null");
@@ -676,9 +678,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
uint32_t index;
if (key->IsSmi()) {
- property = Object::GetElement(object, Smi::cast(key)->value());
+ property = Object::GetElement(
+ isolate_, object, Smi::cast(key)->value());
} else if (key_handle->AsArrayIndex(&index)) {
- property = Object::GetElement(object, index);
+ property = Object::GetElement(isolate_, object, index);
} else {
property = GetProperty(isolate_, object, key_handle);
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 666866ed3..3a3d91599 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -933,27 +933,25 @@ void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
TextElement TextElement::Atom(RegExpAtom* atom) {
- TextElement result = TextElement(ATOM);
- result.data.u_atom = atom;
- return result;
+ return TextElement(ATOM, atom);
}
-TextElement TextElement::CharClass(
- RegExpCharacterClass* char_class) {
- TextElement result = TextElement(CHAR_CLASS);
- result.data.u_char_class = char_class;
- return result;
+TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
+ return TextElement(CHAR_CLASS, char_class);
}
-int TextElement::length() {
- if (text_type == ATOM) {
- return data.u_atom->length();
- } else {
- ASSERT(text_type == CHAR_CLASS);
- return 1;
+int TextElement::length() const {
+ switch (text_type()) {
+ case ATOM:
+ return atom()->length();
+
+ case CHAR_CLASS:
+ return 1;
}
+ UNREACHABLE();
+ return 0;
}
@@ -1087,8 +1085,8 @@ class RecursionCheck {
};
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
- return RegExpEngine::CompilationResult("RegExp too big");
+static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
+ return RegExpEngine::CompilationResult(isolate, "RegExp too big");
}
@@ -1145,7 +1143,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
while (!work_list.is_empty()) {
work_list.RemoveLast()->Emit(this, &new_trace);
}
- if (reg_exp_too_big_) return IrregexpRegExpTooBig();
+ if (reg_exp_too_big_) return IrregexpRegExpTooBig(zone_->isolate());
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
heap->IncreaseTotalRegexpCodeGenerated(code->Size());
@@ -1871,7 +1869,7 @@ static void EmitUseLookupTable(
for (int i = j; i < kSize; i++) {
templ[i] = bit;
}
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = masm->zone()->isolate()->factory();
// TODO(erikcorry): Cache these.
Handle<ByteArray> ba = factory->NewByteArray(kSize, TENURED);
for (int i = 0; i < kSize; i++) {
@@ -2550,7 +2548,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = compiler->macro_assembler()->zone()->isolate();
ASSERT(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
@@ -2561,8 +2559,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
- if (elm.text_type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int i = 0; i < characters && i < quarks.length(); i++) {
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
@@ -2624,7 +2622,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
} else {
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.data.u_char_class;
+ RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(zone());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
@@ -2814,8 +2812,8 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.text_type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
uint16_t c = quarks[j];
if (c <= String::kMaxOneByteCharCode) continue;
@@ -2830,8 +2828,8 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
copy[j] = converted;
}
} else {
- ASSERT(elm.text_type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ ASSERT(elm.text_type() == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* cc = elm.char_class();
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
@@ -3248,20 +3246,20 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
Trace* trace,
bool first_element_checked,
int* checked_up_to) {
- Isolate* isolate = Isolate::Current();
RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->zone()->isolate();
bool ascii = compiler->ascii();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
int element_count = elms_->length();
for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset;
- if (elm.text_type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ int cp_offset = trace->cp_offset() + elm.cp_offset();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset + j)) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
EmitCharacterFunction* emit_function = NULL;
switch (pass) {
case NON_ASCII_MATCH:
@@ -3295,11 +3293,11 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
}
}
} else {
- ASSERT_EQ(elm.text_type, TextElement::CHAR_CLASS);
+ ASSERT_EQ(TextElement::CHAR_CLASS, elm.text_type());
if (pass == CHARACTER_CLASS_MATCH) {
if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
+ RegExpCharacterClass* cc = elm.char_class();
EmitCharClass(assembler,
cc,
ascii,
@@ -3317,12 +3315,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
TextElement elm = elms_->last();
- ASSERT(elm.cp_offset >= 0);
- if (elm.text_type == TextElement::ATOM) {
- return elm.cp_offset + elm.data.u_atom->data().length();
- } else {
- return elm.cp_offset + 1;
- }
+ ASSERT(elm.cp_offset() >= 0);
+ return elm.cp_offset() + elm.length();
}
@@ -3424,8 +3418,8 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.text_type == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ if (elm.text_type() == TextElement::CHAR_CLASS) {
+ RegExpCharacterClass* cc = elm.char_class();
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
if (cc->is_standard(zone())) continue;
@@ -3441,11 +3435,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int TextNode::GreedyLoopTextLength() {
TextElement elm = elms_->at(elms_->length() - 1);
- if (elm.text_type == TextElement::CHAR_CLASS) {
- return elm.cp_offset + 1;
- } else {
- return elm.cp_offset + elm.data.u_atom->data().length();
- }
+ return elm.cp_offset() + elm.length();
}
@@ -3453,8 +3443,8 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
if (elms_->length() != 1) return NULL;
TextElement elm = elms_->at(0);
- if (elm.text_type != TextElement::CHAR_CLASS) return NULL;
- RegExpCharacterClass* node = elm.data.u_char_class;
+ if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
+ RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
@@ -3830,7 +3820,7 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
return true;
}
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = masm->zone()->isolate()->factory();
Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
int skip_distance = GetSkipTable(
min_lookahead, max_lookahead, boolean_skip_table);
@@ -4528,13 +4518,13 @@ void DotPrinter::VisitText(TextNode* that) {
for (int i = 0; i < that->elements()->length(); i++) {
if (i > 0) stream()->Add(" ");
TextElement elm = that->elements()->at(i);
- switch (elm.text_type) {
+ switch (elm.text_type()) {
case TextElement::ATOM: {
- stream()->Add("'%w'", elm.data.u_atom->data());
+ stream()->Add("'%w'", elm.atom()->data());
break;
}
case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.data.u_char_class;
+ RegExpCharacterClass* node = elm.char_class();
stream()->Add("[");
if (node->is_negated())
stream()->Add("^");
@@ -5302,7 +5292,7 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
bool is_ascii,
Zone* zone) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = zone->isolate();
uc16 bottom = from();
uc16 top = to();
if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
@@ -5690,7 +5680,7 @@ OutSet* DispatchTable::Get(uc16 value) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(that->zone()->isolate());
if (check.HasOverflowed()) {
fail("Stack overflow");
return;
@@ -5716,12 +5706,8 @@ void TextNode::CalculateOffsets() {
int cp_offset = 0;
for (int i = 0; i < element_count; i++) {
TextElement& elm = elements()->at(i);
- elm.cp_offset = cp_offset;
- if (elm.text_type == TextElement::ATOM) {
- cp_offset += elm.data.u_atom->data().length();
- } else {
- cp_offset++;
- }
+ elm.set_cp_offset(cp_offset);
+ cp_offset += elm.length();
}
}
@@ -5837,8 +5823,8 @@ void TextNode::FillInBMInfo(int initial_offset,
return;
}
TextElement text = elements()->at(i);
- if (text.text_type == TextElement::ATOM) {
- RegExpAtom* atom = text.data.u_atom;
+ if (text.text_type() == TextElement::ATOM) {
+ RegExpAtom* atom = text.atom();
for (int j = 0; j < atom->length(); j++, offset++) {
if (offset >= bm->length()) {
if (initial_offset == 0) set_bm_info(not_at_start, bm);
@@ -5848,7 +5834,7 @@ void TextNode::FillInBMInfo(int initial_offset,
if (bm->compiler()->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(
- ISOLATE,
+ Isolate::Current(),
character,
bm->max_char() == String::kMaxOneByteCharCode,
chars);
@@ -5860,8 +5846,8 @@ void TextNode::FillInBMInfo(int initial_offset,
}
}
} else {
- ASSERT(text.text_type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* char_class = text.data.u_char_class;
+ ASSERT_EQ(TextElement::CHAR_CLASS, text.text_type());
+ RegExpCharacterClass* char_class = text.char_class();
ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
if (char_class->is_negated()) {
bm->SetAll(offset);
@@ -5973,14 +5959,14 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
void DispatchTableConstructor::VisitText(TextNode* that) {
TextElement elm = that->elements()->at(0);
- switch (elm.text_type) {
+ switch (elm.text_type()) {
case TextElement::ATOM: {
- uc16 c = elm.data.u_atom->data()[0];
+ uc16 c = elm.atom()->data()[0];
AddRange(CharacterRange(c, c));
break;
}
case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.data.u_char_class;
+ RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
if (tree->is_negated()) {
AddInverse(ranges);
@@ -6013,7 +5999,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
bool is_ascii,
Zone* zone) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig();
+ return IrregexpRegExpTooBig(zone->isolate());
}
RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
@@ -6077,7 +6063,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
- return CompilationResult(error_message);
+ return CompilationResult(zone->isolate(), error_message);
}
// Create the correct assembler for the architecture.
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 20c0ac416..dfd415d5a 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -426,20 +426,41 @@ FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
#undef FORWARD_DECLARE
-class TextElement {
+class TextElement V8_FINAL BASE_EMBEDDED {
public:
- enum TextType {UNINITIALIZED, ATOM, CHAR_CLASS};
- TextElement() : text_type(UNINITIALIZED) { }
- explicit TextElement(TextType t) : text_type(t), cp_offset(-1) { }
+ enum TextType {
+ ATOM,
+ CHAR_CLASS
+ };
+
static TextElement Atom(RegExpAtom* atom);
static TextElement CharClass(RegExpCharacterClass* char_class);
- int length();
- TextType text_type;
- union {
- RegExpAtom* u_atom;
- RegExpCharacterClass* u_char_class;
- } data;
- int cp_offset;
+
+ int cp_offset() const { return cp_offset_; }
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ int length() const;
+
+ TextType text_type() const { return text_type_; }
+
+ RegExpTree* tree() const { return tree_; }
+
+ RegExpAtom* atom() const {
+ ASSERT(text_type() == ATOM);
+ return reinterpret_cast<RegExpAtom*>(tree());
+ }
+
+ RegExpCharacterClass* char_class() const {
+ ASSERT(text_type() == CHAR_CLASS);
+ return reinterpret_cast<RegExpCharacterClass*>(tree());
+ }
+
+ private:
+ TextElement(TextType text_type, RegExpTree* tree)
+ : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
+
+ int cp_offset_;
+ TextType text_type_;
+ RegExpTree* tree_;
};
@@ -1594,9 +1615,9 @@ struct RegExpCompileData {
class RegExpEngine: public AllStatic {
public:
struct CompilationResult {
- explicit CompilationResult(const char* error_message)
+ CompilationResult(Isolate* isolate, const char* error_message)
: error_message(error_message),
- code(HEAP->the_hole_value()),
+ code(isolate->heap()->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
: error_message(NULL),
diff --git a/deps/v8/src/lazy-instance.h b/deps/v8/src/lazy-instance.h
index 9d68b8cac..fc03f4d12 100644
--- a/deps/v8/src/lazy-instance.h
+++ b/deps/v8/src/lazy-instance.h
@@ -91,12 +91,13 @@
#ifndef V8_LAZY_INSTANCE_H_
#define V8_LAZY_INSTANCE_H_
+#include "checks.h"
#include "once.h"
namespace v8 {
namespace internal {
-#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
// Default to static mode.
@@ -111,17 +112,15 @@ struct LeakyInstanceTrait {
// Traits that define how an instance is allocated and accessed.
-// TODO(kalmard): __alignof__ is only defined for GCC > 4.2. Fix alignment issue
-// on MIPS with other compilers.
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
-#define LAZY_ALIGN(x) __attribute__((aligned(__alignof__(x))))
-#else
-#define LAZY_ALIGN(x)
-#endif
template <typename T>
struct StaticallyAllocatedInstanceTrait {
- typedef char StorageType[sizeof(T)] LAZY_ALIGN(T);
+ // 16-byte alignment fallback to be on the safe side here.
+ struct V8_ALIGNAS(T, 16) StorageType {
+ char x[sizeof(T)];
+ };
+
+ STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T));
static T* MutableInstance(StorageType* storage) {
return reinterpret_cast<T*>(storage);
@@ -133,8 +132,6 @@ struct StaticallyAllocatedInstanceTrait {
}
};
-#undef LAZY_ALIGN
-
template <typename T>
struct DynamicallyAllocatedInstanceTrait {
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 2e2f80255..3c5abd198 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -2189,7 +2189,7 @@ LAllocatorPhase::~LAllocatorPhase() {
if (FLAG_hydrogen_stats) {
unsigned size = allocator_->zone()->allocation_size() -
allocator_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name(), 0, size);
+ isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
}
if (ShouldProduceTraceOutput()) {
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 790a2182b..fa837c7ed 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -461,12 +461,10 @@ Handle<Code> LChunk::Codegen() {
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
code->set_is_crankshafted(true);
- if (!code.is_null()) {
- void* jit_handler_data =
- assembler.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(info()->isolate(),
- CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
- }
+ void* jit_handler_data =
+ assembler.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(info()->isolate(),
+ CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
CodeGenerator::PrintCode(code, info());
return code;
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index f77391648..fd50ee8f8 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -43,7 +43,7 @@ namespace internal {
V(DoubleRegister, DOUBLE_REGISTER)
-class LOperand: public ZoneObject {
+class LOperand : public ZoneObject {
public:
enum Kind {
INVALID,
@@ -90,7 +90,7 @@ class LOperand: public ZoneObject {
};
-class LUnallocated: public LOperand {
+class LUnallocated : public LOperand {
public:
enum BasicPolicy {
FIXED_SLOT,
@@ -271,7 +271,7 @@ class LUnallocated: public LOperand {
};
-class LMoveOperands BASE_EMBEDDED {
+class LMoveOperands V8_FINAL BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
@@ -317,7 +317,7 @@ class LMoveOperands BASE_EMBEDDED {
};
-class LConstantOperand: public LOperand {
+class LConstantOperand V8_FINAL : public LOperand {
public:
static LConstantOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -342,7 +342,7 @@ class LConstantOperand: public LOperand {
};
-class LArgument: public LOperand {
+class LArgument V8_FINAL : public LOperand {
public:
explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
@@ -353,7 +353,7 @@ class LArgument: public LOperand {
};
-class LStackSlot: public LOperand {
+class LStackSlot V8_FINAL : public LOperand {
public:
static LStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -378,7 +378,7 @@ class LStackSlot: public LOperand {
};
-class LDoubleStackSlot: public LOperand {
+class LDoubleStackSlot V8_FINAL : public LOperand {
public:
static LDoubleStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -403,7 +403,7 @@ class LDoubleStackSlot: public LOperand {
};
-class LRegister: public LOperand {
+class LRegister V8_FINAL : public LOperand {
public:
static LRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -428,7 +428,7 @@ class LRegister: public LOperand {
};
-class LDoubleRegister: public LOperand {
+class LDoubleRegister V8_FINAL : public LOperand {
public:
static LDoubleRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -453,7 +453,7 @@ class LDoubleRegister: public LOperand {
};
-class LParallelMove : public ZoneObject {
+class LParallelMove V8_FINAL : public ZoneObject {
public:
explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
@@ -474,7 +474,7 @@ class LParallelMove : public ZoneObject {
};
-class LPointerMap: public ZoneObject {
+class LPointerMap V8_FINAL : public ZoneObject {
public:
explicit LPointerMap(int position, Zone* zone)
: pointer_operands_(8, zone),
@@ -510,7 +510,7 @@ class LPointerMap: public ZoneObject {
};
-class LEnvironment: public ZoneObject {
+class LEnvironment V8_FINAL : public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
@@ -655,7 +655,7 @@ class LEnvironment: public ZoneObject {
// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator BASE_EMBEDDED {
+class ShallowIterator V8_FINAL BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
@@ -699,7 +699,7 @@ class ShallowIterator BASE_EMBEDDED {
// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator BASE_EMBEDDED {
+class DeepIterator V8_FINAL BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) {
@@ -736,7 +736,7 @@ class LLabel;
// Superclass providing data and behavior common to all the
// arch-specific LPlatformChunk classes.
-class LChunk: public ZoneObject {
+class LChunk : public ZoneObject {
public:
static LChunk* NewChunk(HGraph* graph);
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index b260c81f8..feaafd471 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -631,8 +631,8 @@ static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
- Isolate* isolate = Isolate::Current();
+static Handle<JSValue> WrapInJSValue(Handle<HeapObject> object) {
+ Isolate* isolate = object->GetIsolate();
Handle<JSFunction> constructor = isolate->opaque_reference_function();
Handle<JSValue> result =
Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
@@ -662,8 +662,8 @@ static int GetArrayLength(Handle<JSArray> array) {
template<typename S>
class JSArrayBasedStruct {
public:
- static S Create() {
- Factory* factory = Isolate::Current()->factory();
+ static S Create(Isolate* isolate) {
+ Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(S::kSize_);
return S(array);
}
@@ -691,7 +691,7 @@ class JSArrayBasedStruct {
Handle<Smi>(Smi::FromInt(value), isolate()));
}
Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(field_position);
+ return array_->GetElementNoExceptionThrown(isolate(), field_position);
}
int GetSmiValueField(int field_position) {
Object* res = GetField(field_position);
@@ -724,7 +724,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code,
- Handle<Object> code_scope_info) {
+ Handle<HeapObject> code_scope_info) {
Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
this->SetField(kCodeOffset_, code_wrapper);
@@ -788,7 +788,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
static bool IsInstance(Handle<JSArray> array) {
return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(kSharedInfoOffset_)->IsJSValue();
+ array->GetElementNoExceptionThrown(
+ array->GetIsolate(), kSharedInfoOffset_)->IsJSValue();
}
explicit SharedInfoWrapper(Handle<JSArray> array)
@@ -832,7 +833,7 @@ class FunctionInfoListener {
void FunctionStarted(FunctionLiteral* fun) {
HandleScope scope(isolate());
- FunctionInfoWrapper info = FunctionInfoWrapper::Create();
+ FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate());
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
fun->materialized_literal_count(),
@@ -846,7 +847,8 @@ class FunctionInfoListener {
HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
current_parent_index_ = info.GetParentIndex();
}
@@ -855,10 +857,10 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
info.SetFunctionCode(function_code,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
+ Handle<HeapObject>(isolate()->heap()->null_value()));
}
// Saves full information about a function: its code, its scope info
@@ -870,9 +872,10 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<Object>(shared->scope_info(), isolate()));
+ Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
@@ -935,7 +938,7 @@ class FunctionInfoListener {
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = script->GetIsolate();
FunctionInfoListener listener(isolate);
Handle<Object> original_source =
@@ -1001,12 +1004,14 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- HandleScope scope(array->GetIsolate());
+ Isolate* isolate = array->GetIsolate();
+ HandleScope scope(isolate);
int len = GetArrayLength(array);
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
- SharedFunctionInfo::cast(array->GetElementNoExceptionThrown(i)));
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
+ SharedFunctionInfo::cast(
+ array->GetElementNoExceptionThrown(isolate, i)));
+ SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
@@ -1228,7 +1233,9 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(function->code()->deoptimization_data());
- if (data == HEAP->empty_fixed_array()) return false;
+ if (data == function->GetIsolate()->heap()->empty_fixed_array()) {
+ return false;
+ }
FixedArray* literals = data->LiteralArray();
@@ -1242,34 +1249,48 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
}
-class DependentFunctionFilter : public OptimizedFunctionFilter {
+// Marks code that shares the same shared function info or has inlined
+// code that shares the same function info.
+class DependentFunctionMarker: public OptimizedFunctionVisitor {
public:
- explicit DependentFunctionFilter(
- SharedFunctionInfo* function_info)
- : function_info_(function_info) {}
-
- virtual bool TakeFunction(JSFunction* function) {
- return (function->shared() == function_info_ ||
- IsInlined(function, function_info_));
+ SharedFunctionInfo* shared_info_;
+ bool found_;
+
+ explicit DependentFunctionMarker(SharedFunctionInfo* shared_info)
+ : shared_info_(shared_info), found_(false) { }
+
+ virtual void EnterContext(Context* context) { } // Don't care.
+ virtual void LeaveContext(Context* context) { } // Don't care.
+ virtual void VisitFunction(JSFunction* function) {
+ // It should be guaranteed by the iterator that everything is optimized.
+ ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+ if (shared_info_ == function->shared() ||
+ IsInlined(function, shared_info_)) {
+ // Mark the code for deoptimization.
+ function->code()->set_marked_for_deoptimization(true);
+ found_ = true;
+ }
}
-
- private:
- SharedFunctionInfo* function_info_;
};
static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
DisallowHeapAllocation no_allocation;
+ DependentFunctionMarker marker(function_info);
+ // TODO(titzer): need to traverse all optimized code to find OSR code here.
+ Deoptimizer::VisitAllOptimizedFunctions(function_info->GetIsolate(), &marker);
- DependentFunctionFilter filter(function_info);
- Deoptimizer::DeoptimizeAllFunctionsWith(function_info->GetIsolate(), &filter);
+ if (marker.found_) {
+ // Only go through with the deoptimization if something was found.
+ Deoptimizer::DeoptimizeMarkedCode(function_info->GetIsolate());
+ }
}
MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = new_compile_info_array->GetIsolate();
HandleScope scope(isolate);
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
@@ -1343,7 +1364,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
CHECK(script_handle->IsScript() || script_handle->IsUndefined());
shared_info->set_script(*script_handle);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
}
@@ -1360,20 +1381,24 @@ static int TranslatePosition(int original_position,
Handle<JSArray> position_change_array) {
int position_diff = 0;
int array_len = GetArrayLength(position_change_array);
+ Isolate* isolate = position_change_array->GetIsolate();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
- Object* element = position_change_array->GetElementNoExceptionThrown(i);
+ Object* element =
+ position_change_array->GetElementNoExceptionThrown(isolate, i);
CHECK(element->IsSmi());
int chunk_start = Smi::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = position_change_array->GetElementNoExceptionThrown(i + 1);
+ element = position_change_array->GetElementNoExceptionThrown(isolate,
+ i + 1);
CHECK(element->IsSmi());
int chunk_end = Smi::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(i + 2);
+ element = position_change_array->GetElementNoExceptionThrown(isolate,
+ i + 2);
CHECK(element->IsSmi());
int chunk_changed_end = Smi::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
@@ -1508,7 +1533,7 @@ static Handle<Code> PatchPositionsInCode(
MaybeObject* LiveEdit::PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return shared_info_array->GetIsolate()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
@@ -1526,7 +1551,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
- HEAP->EnsureHeapIsIterable();
+ info->GetIsolate()->heap()->EnsureHeapIsIterable();
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
@@ -1542,7 +1567,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
}
}
- return HEAP->undefined_value();
+ return info->GetIsolate()->heap()->undefined_value();
}
@@ -1588,7 +1613,7 @@ Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
original_script->set_source(*new_source);
// Drop line ends so that they will be recalculated.
- original_script->set_line_ends(HEAP->undefined_value());
+ original_script->set_line_ends(isolate->heap()->undefined_value());
return *old_script_object;
}
@@ -1630,7 +1655,8 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- Object* element = shared_info_array->GetElementNoExceptionThrown(i);
+ Object* element =
+ shared_info_array->GetElementNoExceptionThrown(isolate, i);
CHECK(element->IsJSValue());
Handle<JSValue> jsvalue(JSValue::cast(element));
Handle<SharedFunctionInfo> shared =
@@ -1651,7 +1677,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
static bool FixTryCatchHandler(StackFrame* top_frame,
StackFrame* bottom_frame) {
Address* pointer_address =
- &Memory::Address_at(Isolate::Current()->get_address_from_id(
+ &Memory::Address_at(top_frame->isolate()->get_address_from_id(
Isolate::kHandlerAddress));
while (*pointer_address < top_frame->sp()) {
@@ -1687,7 +1713,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(bottom_js_frame->is_java_script());
// Check the nature of the top frame.
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = bottom_js_frame->isolate();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() &&
@@ -1790,7 +1816,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Make sure FixTryCatchHandler is idempotent.
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
- Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
+ Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit();
*top_frame_pc_address = code->entry();
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
@@ -1838,8 +1864,7 @@ class MultipleFunctionTarget {
// Drops all call frame matched by target and all frames above them.
template<typename TARGET>
static const char* DropActivationsInActiveThreadImpl(
- TARGET& target, bool do_drop) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate, TARGET& target, bool do_drop) {
Debug* debug = isolate->debug();
Zone zone(isolate);
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
@@ -1938,8 +1963,8 @@ static const char* DropActivationsInActiveThread(
Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
MultipleFunctionTarget target(shared_info_array, result);
- const char* message =
- DropActivationsInActiveThreadImpl(target, do_drop);
+ const char* message = DropActivationsInActiveThreadImpl(
+ shared_info_array->GetIsolate(), target, do_drop);
if (message) {
return message;
}
@@ -1949,7 +1974,7 @@ static const char* DropActivationsInActiveThread(
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
- if (result->GetElement(i) ==
+ if (result->GetElement(result->GetIsolate(), i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
@@ -2004,7 +2029,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
- Isolate::Current()->thread_manager()->IterateArchivedThreads(
+ isolate->thread_manager()->IterateArchivedThreads(
&inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
@@ -2056,7 +2081,8 @@ class SingleFrameTarget {
const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
SingleFrameTarget target(frame);
- const char* result = DropActivationsInActiveThreadImpl(target, true);
+ const char* result = DropActivationsInActiveThreadImpl(
+ frame->isolate(), target, true);
if (result != NULL) {
return result;
}
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 6bba8823e..909d4a513 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -41,14 +41,12 @@ const char* const Log::kLogToConsole = "-";
Log::Log(Logger* logger)
: is_stopped_(false),
output_handle_(NULL),
- mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
}
void Log::Initialize(const char* log_file_name) {
- mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
// --log-all enables all the log flags.
@@ -66,11 +64,6 @@ void Log::Initialize(const char* log_file_name) {
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
- // --prof_lazy controls --log-code.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- }
-
// If we're logging anything, we need to open the log file.
if (Log::InitLogAtStart()) {
if (strcmp(log_file_name, kLogToConsole) == 0) {
@@ -116,9 +109,6 @@ FILE* Log::Close() {
DeleteArray(message_buffer_);
message_buffer_ = NULL;
- delete mutex_;
- mutex_ = NULL;
-
is_stopped_ = false;
return result;
}
@@ -126,7 +116,7 @@ FILE* Log::Close() {
Log::MessageBuilder::MessageBuilder(Log* log)
: log_(log),
- sl(log_->mutex_),
+ lock_guard_(&log_->mutex_),
pos_(0) {
ASSERT(log_->message_buffer_ != NULL);
}
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 861a8263b..ec8415e4b 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -107,7 +107,7 @@ class Log {
private:
Log* log_;
- ScopedLock sl;
+ LockGuard<Mutex> lock_guard_;
int pos_;
};
@@ -142,7 +142,7 @@ class Log {
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
+ Mutex mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index a1e5a6752..0f0ad4039 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -556,13 +556,20 @@ class Profiler: public Thread {
} else {
buffer_[head_] = *sample;
head_ = Succ(head_);
- buffer_semaphore_->Signal(); // Tell we have an element.
+ buffer_semaphore_.Signal(); // Tell we have an element.
}
}
+ virtual void Run();
+
+ // Pause and Resume TickSample data collection.
+ void pause() { paused_ = true; }
+ void resume() { paused_ = false; }
+
+ private:
// Waits for a signal and removes profiling data.
bool Remove(TickSample* sample) {
- buffer_semaphore_->Wait(); // Wait for an element.
+ buffer_semaphore_.Wait(); // Wait for an element.
*sample = buffer_[tail_];
bool result = overflow_;
tail_ = Succ(tail_);
@@ -570,14 +577,6 @@ class Profiler: public Thread {
return result;
}
- void Run();
-
- // Pause and Resume TickSample data collection.
- bool paused() const { return paused_; }
- void pause() { paused_ = true; }
- void resume() { paused_ = false; }
-
- private:
// Returns the next index in the cyclic buffer.
int Succ(int index) { return (index + 1) % kBufferSize; }
@@ -589,7 +588,8 @@ class Profiler: public Thread {
int head_; // Index to the buffer head.
int tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
- Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
+ // Sempahore used for buffer synchronization.
+ Semaphore buffer_semaphore_;
// Tells whether profiler is engaged, that is, processing thread is stated.
bool engaged_;
@@ -622,13 +622,13 @@ class Ticker: public Sampler {
ASSERT(profiler_ == NULL);
profiler_ = profiler;
IncreaseProfilingDepth();
- if (!FLAG_prof_lazy && !IsActive()) Start();
+ if (!IsActive()) Start();
}
void ClearProfiler() {
- DecreaseProfilingDepth();
profiler_ = NULL;
if (IsActive()) Stop();
+ DecreaseProfilingDepth();
}
private:
@@ -645,7 +645,7 @@ Profiler::Profiler(Isolate* isolate)
head_(0),
tail_(0),
overflow_(false),
- buffer_semaphore_(OS::CreateSemaphore(0)),
+ buffer_semaphore_(0),
engaged_(false),
running_(false),
paused_(false) {
@@ -656,7 +656,7 @@ void Profiler::Engage() {
if (engaged_) return;
engaged_ = true;
- OS::LogSharedLibraryAddresses();
+ OS::LogSharedLibraryAddresses(isolate_);
// Start thread processing the profiler buffer.
running_ = true;
@@ -686,7 +686,7 @@ void Profiler::Disengage() {
Insert(&sample);
Join();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
+ LOG(isolate_, UncheckedStringEvent("profiler", "end"));
}
@@ -709,14 +709,12 @@ Logger::Logger(Isolate* isolate)
ticker_(NULL),
profiler_(NULL),
log_events_(NULL),
- logging_nesting_(0),
- cpu_profiler_nesting_(0),
+ is_logging_(false),
log_(new Log(this)),
ll_logger_(NULL),
jit_logger_(NULL),
listeners_(5),
- is_initialized_(false),
- epoch_(0) {
+ is_initialized_(false) {
}
@@ -867,7 +865,7 @@ void Logger::CodeDeoptEvent(Code* code) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
msg.WriteToLogFile();
}
@@ -877,7 +875,7 @@ void Logger::TimerEvent(StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
: "timer-event-end,\"%s\",%ld\n";
msg.Append(format, name, since_epoch);
@@ -906,8 +904,8 @@ void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
const char* Logger::TimerEventScope::v8_recompile_synchronous =
"V8.RecompileSynchronous";
-const char* Logger::TimerEventScope::v8_recompile_parallel =
- "V8.RecompileParallel";
+const char* Logger::TimerEventScope::v8_recompile_concurrent =
+ "V8.RecompileConcurrent";
const char* Logger::TimerEventScope::v8_compile_full_code =
"V8.CompileFullCode";
const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
@@ -976,7 +974,7 @@ void Logger::LogRuntime(Vector<const char> format,
if (c == '%' && i <= format.length() - 2) {
i++;
ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(format[i] - '0');
+ MaybeObject* maybe = args->GetElement(isolate_, format[i] - '0');
Object* obj;
if (!maybe->ToObject(&obj)) {
msg.Append("<exception>");
@@ -1233,7 +1231,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line) {
+ Name* source, int line, int column) {
PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
if (!is_logging_code_events()) return;
@@ -1252,7 +1250,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
} else {
msg.AppendSymbolName(Symbol::cast(source));
}
- msg.Append(":%d\",", line);
+ msg.Append(":%d:%d\",", line, column);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
@@ -1500,7 +1498,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
- msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
+ msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1521,43 +1519,11 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
-bool Logger::IsProfilerPaused() {
- return profiler_ == NULL || profiler_->paused();
-}
-
-
-void Logger::PauseProfiler() {
+void Logger::StopProfiler() {
if (!log_->IsEnabled()) return;
if (profiler_ != NULL) {
- // It is OK to have negative nesting.
- if (--cpu_profiler_nesting_ == 0) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- ticker_->Stop();
- FLAG_log_code = false;
- LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
- }
- --logging_nesting_;
- }
- }
-}
-
-
-void Logger::ResumeProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- if (cpu_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogAccessorCallbacks();
- if (!ticker_->IsActive()) ticker_->Start();
- }
- profiler_->resume();
- }
+ profiler_->pause();
+ is_logging_ = false;
}
}
@@ -1565,7 +1531,7 @@ void Logger::ResumeProfiler() {
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
- PauseProfiler();
+ StopProfiler();
}
@@ -1712,6 +1678,8 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ GetScriptColumnNumber(script, shared->start_position()) + 1;
if (script->name()->IsString()) {
Handle<String> script_name(String::cast(script->name()));
if (line_num > 0) {
@@ -1719,7 +1687,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code, *shared, NULL,
- *script_name, line_num));
+ *script_name, line_num, column_num));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(isolate_,
@@ -1732,7 +1700,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code, *shared, NULL,
- isolate_->heap()->empty_string(), line_num));
+ isolate_->heap()->empty_string(), line_num, column_num));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1796,21 +1764,21 @@ void Logger::LogAccessorCallbacks() {
}
-static void AddIsolateIdIfNeeded(StringStream* stream) {
- Isolate* isolate = Isolate::Current();
+static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
if (isolate->IsDefaultIsolate()) return;
stream->Add("isolate-%p-", isolate);
}
-static SmartArrayPointer<const char> PrepareLogFileName(const char* file_name) {
+static SmartArrayPointer<const char> PrepareLogFileName(
+ Isolate* isolate, const char* file_name) {
if (strchr(file_name, '%') != NULL ||
- !Isolate::Current()->IsDefaultIsolate()) {
+ !isolate->IsDefaultIsolate()) {
// If there's a '%' in the log file name we have to expand
// placeholders.
HeapStringAllocator allocator;
StringStream stream(&allocator);
- AddIsolateIdIfNeeded(&stream);
+ AddIsolateIdIfNeeded(isolate, &stream);
for (const char* p = file_name; *p; p++) {
if (*p == '%') {
p++;
@@ -1863,13 +1831,8 @@ bool Logger::SetUp(Isolate* isolate) {
FLAG_log_snapshot_positions = true;
}
- // --prof_lazy controls --log-code.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- }
-
SmartArrayPointer<const char> log_file_name =
- PrepareLogFileName(FLAG_logfile);
+ PrepareLogFileName(isolate, FLAG_logfile);
log_->Initialize(*log_file_name);
if (FLAG_ll_prof) {
@@ -1880,20 +1843,16 @@ bool Logger::SetUp(Isolate* isolate) {
ticker_ = new Ticker(isolate, kSamplingIntervalMs);
if (Log::InitLogAtStart()) {
- logging_nesting_ = 1;
+ is_logging_ = true;
}
if (FLAG_prof) {
profiler_ = new Profiler(isolate);
- if (FLAG_prof_lazy) {
- profiler_->pause();
- } else {
- logging_nesting_ = 1;
- profiler_->Engage();
- }
+ is_logging_ = true;
+ profiler_->Engage();
}
- if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
+ if (FLAG_log_internal_timer_events || FLAG_prof) timer_.Start();
return true;
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 24d83ef12..81d45e507 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "objects.h"
#include "platform.h"
+#include "platform/elapsed-timer.h"
namespace v8 {
namespace internal {
@@ -248,7 +249,7 @@ class Logger {
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line);
+ Name* source, int line, int column);
void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
@@ -321,7 +322,7 @@ class Logger {
void LogTimerEvent(StartEnd se);
static const char* v8_recompile_synchronous;
- static const char* v8_recompile_parallel;
+ static const char* v8_recompile_concurrent;
static const char* v8_compile_full_code;
static const char* v8_execute;
static const char* v8_external;
@@ -340,19 +341,16 @@ class Logger {
void LogRuntime(Vector<const char> format, JSArray* args);
bool is_logging() {
- return logging_nesting_ > 0;
+ return is_logging_;
}
bool is_logging_code_events() {
return is_logging() || jit_logger_ != NULL;
}
- // Pause/Resume collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded until
- // data collection is Resumed.
- void PauseProfiler();
- void ResumeProfiler();
- bool IsProfilerPaused();
+ // Stop collection of profiling data.
+ // When data collection is paused, CPU Tick events are discarded.
+ void StopProfiler();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
@@ -434,13 +432,9 @@ class Logger {
friend class TimeLog;
friend class Profiler;
template <StateTag Tag> friend class VMState;
-
friend class LoggerTestHelper;
-
- int logging_nesting_;
- int cpu_profiler_nesting_;
-
+ bool is_logging_;
Log* log_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
@@ -450,7 +444,7 @@ class Logger {
// 'true' between SetUp() and TearDown().
bool is_initialized_;
- int64_t epoch_;
+ ElapsedTimer timer_;
friend class CpuProfiler;
};
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index d50231dce..d699c1462 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -42,8 +42,8 @@ const SETTER = 1;
# These definitions must match the index of the properties in objects.h.
const kApiTagOffset = 0;
const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
+const kApiSerialNumberOffset = 3;
+const kApiConstructorOffset = 3;
const kApiPrototypeTemplateOffset = 5;
const kApiParentTemplateOffset = 6;
const kApiFlagOffset = 14;
@@ -67,7 +67,9 @@ const msPerMonth = 2592000000;
# For apinatives.js
const kUninitialized = -1;
-const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h
+const kReadOnlyPrototypeBit = 3;
+const kRemovePrototypeBit = 4; # For FunctionTemplateInfo, matches objects.h
+const kDoNotCacheBit = 5; # For FunctionTemplateInfo, matches objects.h
# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
const kInvalidDate = 'Invalid Date';
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
index 10773e720..321309c60 100644
--- a/deps/v8/src/mark-compact-inl.h
+++ b/deps/v8/src/mark-compact-inl.h
@@ -58,7 +58,7 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
ASSERT(IsMarked(obj));
- ASSERT(HEAP->Contains(obj));
+ ASSERT(obj->GetIsolate()->heap()->Contains(obj));
marking_deque_.PushBlack(obj);
}
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index f065da1f9..263de4878 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -74,16 +74,18 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
heap_(NULL),
code_flusher_(NULL),
encountered_weak_collections_(NULL),
- code_to_deoptimize_(NULL) { }
+ have_code_to_deoptimize_(false) { }
#ifdef VERIFY_HEAP
class VerifyMarkingVisitor: public ObjectVisitor {
public:
+ explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->mark_compact_collector()->IsMarked(object));
+ CHECK(heap_->mark_compact_collector()->IsMarked(object));
}
}
}
@@ -97,11 +99,14 @@ class VerifyMarkingVisitor: public ObjectVisitor {
VisitPointer(rinfo->target_object_address());
}
}
+
+ private:
+ Heap* heap_;
};
-static void VerifyMarking(Address bottom, Address top) {
- VerifyMarkingVisitor visitor;
+static void VerifyMarking(Heap* heap, Address bottom, Address top) {
+ VerifyMarkingVisitor visitor(heap);
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
@@ -129,7 +134,7 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePage* page = it.next();
Address limit = it.has_next() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
- VerifyMarking(page->area_start(), limit);
+ VerifyMarking(space->heap(), page->area_start(), limit);
}
}
@@ -139,7 +144,7 @@ static void VerifyMarking(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
- VerifyMarking(p->area_start(), p->area_end());
+ VerifyMarking(space->heap(), p->area_start(), p->area_end());
}
}
@@ -153,7 +158,7 @@ static void VerifyMarking(Heap* heap) {
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
- VerifyMarkingVisitor visitor;
+ VerifyMarkingVisitor visitor(heap);
LargeObjectIterator it(heap->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
@@ -961,22 +966,10 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin).
isolate()->stub_cache()->Clear();
- if (code_to_deoptimize_ != Smi::FromInt(0)) {
- // Convert the linked list of Code objects into a ZoneList.
- Zone zone(isolate());
- ZoneList<Code*> codes(4, &zone);
-
- Object *list = code_to_deoptimize_;
- while (list->IsCode()) {
- Code *code = Code::cast(list);
- list = code->code_to_deoptimize_link();
- codes.Add(code, &zone);
- // Destroy the link and don't ever try to deoptimize this code again.
- code->set_code_to_deoptimize_link(Smi::FromInt(0));
- }
- code_to_deoptimize_ = Smi::FromInt(0);
-
- Deoptimizer::DeoptimizeCodeList(isolate(), &codes);
+ if (have_code_to_deoptimize_) {
+ // Some code objects were marked for deoptimization during the GC.
+ Deoptimizer::DeoptimizeMarkedCode(isolate());
+ have_code_to_deoptimize_ = false;
}
}
@@ -1420,8 +1413,8 @@ class MarkCompactMarkingVisitor
INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
HeapObject* obj)) {
#ifdef DEBUG
- ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
+ ASSERT(collector->heap()->Contains(obj));
+ ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
#endif
Map* map = obj->map();
Heap* heap = obj->GetHeap();
@@ -1795,8 +1788,6 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
- ASSERT(heap() == Isolate::Current()->heap());
-
// Enable code flushing for non-incremental cycles.
if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
EnableCodeFlushing(!was_marked_incrementally_);
@@ -2590,7 +2581,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
for (int i = new_number_of_transitions * step;
i < number_of_transitions * step;
i++) {
- prototype_transitions->set_undefined(heap_, header + i);
+ prototype_transitions->set_undefined(header + i);
}
}
@@ -2623,16 +2614,9 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
ASSERT(entries->is_code_at(i));
Code* code = entries->code_at(i);
- if (IsMarked(code) && !WillBeDeoptimized(code)) {
- // Insert the code into the code_to_deoptimize linked list.
- Object* next = code_to_deoptimize_;
- if (next != Smi::FromInt(0)) {
- // Record the slot so that it is updated.
- Object** slot = code->code_to_deoptimize_link_slot();
- RecordSlot(slot, slot, next);
- }
- code->set_code_to_deoptimize_link(next);
- code_to_deoptimize_ = code;
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ have_code_to_deoptimize_ = true;
}
entries->clear_at(i);
}
@@ -3065,13 +3049,14 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
};
-static inline void UpdateSlot(ObjectVisitor* v,
+static inline void UpdateSlot(Isolate* isolate,
+ ObjectVisitor* v,
SlotsBuffer::SlotType slot_type,
Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(v);
+ rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CODE_ENTRY_SLOT: {
@@ -3085,17 +3070,17 @@ static inline void UpdateSlot(ObjectVisitor* v,
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::JS_RETURN_SLOT: {
RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+ if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(v);
+ rinfo.Visit(isolate, v);
break;
}
default:
@@ -3283,11 +3268,7 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
- // We assume the code_to_deoptimize_link is initialized to undefined.
- // If it is 0, or refers to another Code object, then this code
- // is already linked, or was already linked into the list.
- return code->code_to_deoptimize_link() != heap()->undefined_value()
- || code->marked_for_deoptimization();
+ return code->marked_for_deoptimization();
}
@@ -3474,9 +3455,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- // Update the heads of the native contexts list the code to deoptimize list.
+ // Update the head of the native contexts list in the heap.
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
- updating_visitor.VisitPointer(&code_to_deoptimize_);
heap_->string_table()->Iterate(&updating_visitor);
@@ -4287,7 +4267,8 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
} else {
++slot_idx;
ASSERT(slot_idx < idx_);
- UpdateSlot(&v,
+ UpdateSlot(heap->isolate(),
+ &v,
DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
@@ -4309,7 +4290,8 @@ void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
ASSERT(slot_idx < idx_);
Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(&v,
+ UpdateSlot(heap->isolate(),
+ &v,
DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index ee845a083..df2f78211 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -949,7 +949,7 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_collections_;
- Object* code_to_deoptimize_;
+ bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
diff --git a/deps/v8/src/marking-thread.cc b/deps/v8/src/marking-thread.cc
index ac9f944fe..58bca3662 100644
--- a/deps/v8/src/marking-thread.cc
+++ b/deps/v8/src/marking-thread.cc
@@ -39,9 +39,9 @@ MarkingThread::MarkingThread(Isolate* isolate)
: Thread("MarkingThread"),
isolate_(isolate),
heap_(isolate->heap()),
- start_marking_semaphore_(OS::CreateSemaphore(0)),
- end_marking_semaphore_(OS::CreateSemaphore(0)),
- stop_semaphore_(OS::CreateSemaphore(0)) {
+ start_marking_semaphore_(0),
+ end_marking_semaphore_(0),
+ stop_semaphore_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
}
@@ -57,33 +57,33 @@ void MarkingThread::Run() {
DisallowHandleDereference no_deref;
while (true) {
- start_marking_semaphore_->Wait();
+ start_marking_semaphore_.Wait();
if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
+ stop_semaphore_.Signal();
return;
}
- end_marking_semaphore_->Signal();
+ end_marking_semaphore_.Signal();
}
}
void MarkingThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_marking_semaphore_->Signal();
- stop_semaphore_->Wait();
+ start_marking_semaphore_.Signal();
+ stop_semaphore_.Wait();
Join();
}
void MarkingThread::StartMarking() {
- start_marking_semaphore_->Signal();
+ start_marking_semaphore_.Signal();
}
void MarkingThread::WaitForMarkingThread() {
- end_marking_semaphore_->Wait();
+ end_marking_semaphore_.Wait();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/marking-thread.h b/deps/v8/src/marking-thread.h
index 9efa3af13..021cd5b48 100644
--- a/deps/v8/src/marking-thread.h
+++ b/deps/v8/src/marking-thread.h
@@ -43,24 +43,19 @@ namespace internal {
class MarkingThread : public Thread {
public:
explicit MarkingThread(Isolate* isolate);
+ ~MarkingThread() {}
void Run();
void Stop();
void StartMarking();
void WaitForMarkingThread();
- ~MarkingThread() {
- delete start_marking_semaphore_;
- delete end_marking_semaphore_;
- delete stop_semaphore_;
- }
-
private:
Isolate* isolate_;
Heap* heap_;
- Semaphore* start_marking_semaphore_;
- Semaphore* end_marking_semaphore_;
- Semaphore* stop_semaphore_;
+ Semaphore start_marking_semaphore_;
+ Semaphore end_marking_semaphore_;
+ Semaphore stop_semaphore_;
volatile AtomicWord stop_thread_;
int id_;
static Atomic32 id_counter_;
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 2ca00831c..2fa6804d1 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -338,7 +338,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -351,12 +351,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index fcf49f110..345b64245 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -126,7 +126,8 @@ void CpuFeatures::Probe() {
supported_ |= static_cast<uint64_t>(1) << FPU;
#else
// Probe for additional features not already known to be available.
- if (OS::MipsCpuHasFeature(FPU)) {
+ CPU cpu;
+ if (cpu.has_fpu()) {
// This implementation also sets the FPU flags if
// runtime detection of FPU returns true.
supported_ |= static_cast<uint64_t>(1) << FPU;
@@ -237,15 +238,12 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-mips-inl.h for inlined constructors.
Operand::Operand(Handle<Object> handle) {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
-#endif
AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -2203,8 +2201,7 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
- (kImm26Bits + kImmFieldShift)) == 0;
+ bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
uint32_t target_field =
static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
bool patched_jump = false;
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index d424cbc72..3aabd97b9 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -299,6 +299,24 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+ // Function is also the parameter to the runtime call.
+ __ push(a1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -308,59 +326,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
- // Do a tail-call of the compiled function.
- __ Jump(t9);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- __ push(a1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore receiver.
- __ pop(a1);
-
- // Tear down internal frame.
- }
-
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -815,60 +801,17 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(t9);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(t9);
}
@@ -1000,27 +943,44 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
+ // Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Subu(a1, a1, a2);
+ __ SmiTag(a1);
+
+ // Pass both function and pc offset as arguments.
__ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(a1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+ // If the code object is null, just return to the unoptimized code.
+ __ Ret(eq, v0, Operand(Smi::FromInt(0)));
- // Untag the AST id and push it on the stack.
- __ SmiUntag(v0);
- __ push(v0);
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(a1);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ addu(v0, v0, a1);
+ __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 481fe7c24..0589bf016 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -310,134 +321,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(a3);
-
- // Attempt to allocate new JSFunction in new space.
- __ Allocate(JSFunction::kSize, v0, a1, a2, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
- __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
- __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
- __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
- __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ lw(a1,
- FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ And(at, a1, a1);
- __ Branch(&check_optimized, ne, at, Operand(zero_reg));
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Return result. The argument function info has been popped already.
- __ Ret(USE_DELAY_SLOT);
- __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
-
- // a2 holds native context, a1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into t0.
- __ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot));
- __ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot));
- __ Branch(&install_optimized, eq, a2, Operand(t1));
-
- // Iterate through the rest of map backwards. t0 holds an index as a Smi.
- Label loop;
- __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ Branch(&install_unoptimized, eq, t0,
- Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
- __ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ lw(t1, MemOperand(t1));
- __ Branch(&loop, ne, a2, Operand(t1));
- // Hit: fetch the optimized code.
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ Addu(t1, t1, Operand(kPointerSize));
- __ lw(t0, MemOperand(t1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, t2, t3);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(t0, v0);
- __ RecordWriteContextSlot(
- a2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- t0,
- a1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Push(cp, a3, t0);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -638,291 +521,135 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (destination == kFPURegisters) {
- // Load the double from tagged HeapNumber to double register.
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
- // point in generating even more instructions.
- __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ lw(dst2, FieldMemOperand(object,
- HeapNumber::kValueOffset + kPointerSize));
- }
- __ Branch(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
- Register scratch2,
- FPURegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
-
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
+
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+
+ Register scratch =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch2 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch3 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kLithiumScratchDouble.low();
+ DoubleRegister double_input = f12;
+
+ __ Push(scratch, scratch2, scratch3);
+
+ __ ldc1(double_input, MemOperand(input_reg, double_offset));
+
+ if (!skip_fastpath()) {
+ // Clear cumulative exception flags and save the FCSR.
+ __ cfc1(scratch2, FCSR);
+ __ ctc1(zero_reg, FCSR);
+ // Try a conversion to a signed integer.
+ __ trunc_w_d(double_scratch, double_input);
+ __ mfc1(result_reg, double_scratch);
+ // Retrieve and restore the FCSR.
+ __ cfc1(scratch, FCSR);
+ __ ctc1(scratch2, FCSR);
+ // Check for overflow and NaNs.
+ __ And(
+ scratch, scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
+ | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+ }
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+ __ Move(input_low, input_high, double_input);
+
+ Label normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ __ Ext(result_reg,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Subu(result_reg,
+ result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
__ Branch(&done);
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- __ bind(&done);
-}
-
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sllv(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Subu(scratch, zero_reg, scratch);
+ __ sllv(input_low, input_low, scratch);
+ __ Branch(&shift_done);
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done, maybe_undefined;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- __ Branch(&done);
+ __ bind(&pos_shift);
+ __ srlv(input_low, input_low, scratch);
- __ bind(&maybe_undefined);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- __ li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Subu(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
__ bind(&done);
-}
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- if (!IsMipsSoftFloatABI) {
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- // Double returned in registers v0 and v1.
- __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
- __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
- }
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
+ __ Pop(scratch, scratch2, scratch3);
+ __ Ret();
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(a1) &&
the_heap_number_.is(v0) &&
@@ -1589,6 +1316,42 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
+// Generates code to call a C function to do a double operation.
+// This code never falls through, but returns with a heap number containing
+// the result in v0.
+// Register heap_number_result must be a heap number in which the
+// result of the operation will be stored.
+// Requires the following layout on entry:
+// a0: Left value (least significant part of mantissa).
+// a1: Left value (sign, exponent, top of mantissa).
+// a2: Right value (least significant part of mantissa).
+// a3: Right value (sign, exponent, top of mantissa).
+static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
+ // Store answer in the overwritable heap number.
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ pop(ra);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
+}
+
+
void BinaryOpStub::Initialize() {
platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
@@ -1793,7 +1556,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
Register right = a0;
Register scratch1 = t3;
Register scratch2 = t5;
- Register scratch3 = t0;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands) {
@@ -1816,49 +1578,41 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on operation.
- FloatingPointHelper::Destination destination =
- op != Token::MOD ?
- FloatingPointHelper::kFPURegisters :
- FloatingPointHelper::kCoreRegisters;
-
// Allocate new heap number for result.
Register result = s0;
BinaryOpStub_GenerateHeapResultAllocation(
masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
- // Load the operands.
+ // Load left and right operands into f12 and f14.
if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ __ SmiUntag(scratch1, a0);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ SmiUntag(scratch1, a1);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
} else {
- // Load right operand to f14 or a2/a3.
+ // Load right operand to f14.
if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, f14, f16, a2, a3, heap_number_map,
- scratch1, scratch2, f2, miss);
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, miss);
} else {
Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, f14, a2, a3, heap_number_map,
- scratch1, scratch2, fail);
+ __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
}
// Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
// jumps to |miss|.
if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, f12, f16, a0, a1, heap_number_map,
- scratch1, scratch2, f2, miss);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, miss);
} else {
Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, f12, a0, a1, heap_number_map,
- scratch1, scratch2, fail);
+ __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
}
}
// Calculate the result.
- if (destination == FloatingPointHelper::kFPURegisters) {
+ if (op != Token::MOD) {
// Using FPU registers:
// f12: Left value.
// f14: Right value.
@@ -1887,10 +1641,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ mov(v0, result);
} else {
// Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
- result,
- scratch1);
+ CallCCodeForDoubleOperation(masm, op, result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
@@ -1908,24 +1659,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ SmiUntag(a2, right);
} else {
// Convert operands to 32-bit integers. Right in a2 and left in a3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
+ __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
+ __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
}
Label result_not_a_smi;
switch (op) {
@@ -2159,36 +1894,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination = (op_ != Token::MOD)
- ? FloatingPointHelper::kFPURegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- f14,
- f16,
- a2,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- f12,
- f16,
- t0,
- t1,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
-
- if (destination == FloatingPointHelper::kFPURegisters) {
+
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
+
+ if (op_ != Token::MOD) {
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -2265,10 +1977,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ BranchF(&transition, NULL, ne, f14, f16);
}
- // We preserved a0 and a1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(t1, t0);
-
Label pop_and_call_runtime;
// Allocate a heap number to store the result.
@@ -2281,12 +1989,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&pop_and_call_runtime,
mode_);
- // Load the left value from the value saved on the stack.
- __ Pop(a1, a0);
-
// Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
+ CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
@@ -2306,30 +2010,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::SHR:
case Token::SHL: {
Label return_heap_number;
- Register scratch3 = t1;
// Convert operands to 32-bit integers. Right in a2 and left in a3. The
// registers a0 and a1 (right and left) are preserved for the runtime
// call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
+ __ LoadNumberAsInt32(
+ left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
+ __ LoadNumberAsInt32(
+ right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
// The ECMA-262 standard specifies that, for shift operations, only the
// 5 least significant bits of the shift value should be used.
@@ -2816,16 +2503,6 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;
@@ -3046,8 +2723,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@@ -6184,7 +5861,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -6193,23 +5869,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
- &not_cached);
+ slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
- __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ And(scratch2, scratch2, scratch4);
- __ Branch(slow, ne, scratch2, Operand(scratch4));
- __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ sw(arg, MemOperand(sp, stack_offset));
-
__ bind(&done);
}
@@ -6877,8 +6539,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
{ REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
@@ -6910,7 +6570,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7214,10 +6874,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3,
- // Overwrites all regs after this.
- t1, t2, t3, t5, a2,
- &slow_elements);
+ __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
@@ -7289,6 +6946,9 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter.
+ __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ li(at, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
@@ -7308,87 +6968,125 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Branch(&next, ne, a3, Operand(kind));
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // a2 - type info cell
- // a3 - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// a0 - number of arguments
// a1 - constructor?
// sp[0] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
- __ And(at, a3, Operand(1));
- __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ And(at, a3, Operand(1));
+ __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+ }
// look at the first argument
__ lw(t1, MemOperand(sp, 0));
__ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ Addu(a3, a3, Operand(1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&normal_sequence, eq, a2, Operand(at));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ lw(t1, FieldMemOperand(t1, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&normal_sequence, ne, t1, Operand(at));
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ SmiTag(a3);
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(a3);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ Addu(a3, a3, Operand(1));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ if (FLAG_debug_code) {
+ __ lw(t1, FieldMemOperand(t1, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(a3);
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(a3);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Branch(&next, ne, a3, Operand(kind));
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7421,6 +7119,33 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
@@ -7454,49 +7179,24 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
- Label no_info, switch_ready;
+ Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
__ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
- // The type cell may have undefined in its value.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&no_info, eq, a3, Operand(at));
-
- // The type cell has either an AllocationSite or a JSFunction.
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ lw(t0, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&no_info, ne, t0, Operand(at));
__ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ li(a3, Operand(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ And(at, a0, a0);
- __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 1ae1d3454..8c9d22ae5 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -69,7 +69,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -240,7 +240,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
- bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -316,7 +316,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -391,7 +391,7 @@ class RecordWriteStub: public PlatformCodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@@ -434,19 +434,6 @@ class RecordWriteStub: public PlatformCodeStub {
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -527,119 +514,6 @@ class DirectCEntryStub: public PlatformCodeStub {
bool NeedsImmovableCode() { return true; }
};
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- FPURegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- FPURegister double_dst,
- FPURegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when FPU is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when FPU is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in v0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
- // must be supported. If kCoreRegisters are requested and FPU is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 240b02ce4..32d7d0d65 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -46,8 +46,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -63,7 +63,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index d13b23330..49d0b377e 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -87,14 +87,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif // USE_SIMULATOR.
}
-
-void CPU::DebugBreak() {
-#ifdef __mips
- asm volatile("break");
-#endif // #ifdef __mips
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 020228fc6..1535231dd 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -60,7 +60,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// li and Call pseudo-instructions emit two instructions each.
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_return()->entry())));
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry())));
patcher.masm()->Call(v8::internal::t9);
patcher.masm()->nop();
patcher.masm()->nop();
@@ -105,7 +105,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// call t9 (jalr t9 / nop instruction pair)
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_slot()->entry())));
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry())));
patcher.masm()->Call(v8::internal::t9);
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 57d3880ed..16f75b863 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -101,12 +101,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -123,12 +118,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -143,191 +133,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- return true;
+ reinterpret_cast<uint32_t>(osr_builtin->entry()));
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(interrupt_code->entry()));
- return false;
+ reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -542,10 +374,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
- __ push(t2);
- }
+ __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+ __ push(t2);
__ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
__ push(t2);
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 708df39d2..691df940f 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -50,9 +50,6 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
#include "v8.h"
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index b60502c9a..df3f4170b 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -298,8 +298,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -369,9 +368,8 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
+ // Call will emit a li t9 first, so it is safe to use the delay slot.
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
@@ -418,8 +416,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(a2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(v0);
EmitProfilingCounterReset();
@@ -1333,8 +1331,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ li(a0, Operand(info));
- __ push(a0);
+ __ li(a2, Operand(info));
__ CallStub(&stub);
} else {
__ li(a0, Operand(info));
@@ -3031,7 +3028,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -3043,7 +3040,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
__ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Branch(if_true, ne, t0, Operand(zero_reg));
+ __ Branch(&skip_lookup, ne, t0, Operand(zero_reg));
// Check for fast case object. Generate false result for slow case object.
__ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
@@ -3089,6 +3086,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Branch(&loop, ne, t0, Operand(a2));
__ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
@@ -3097,16 +3102,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Branch(if_false, ne, a2, Operand(a3));
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3339,7 +3337,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index eb730bb38..e250e0ee4 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -359,7 +359,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
// If the stub cache probing failed, the receiver might be a value.
@@ -395,7 +395,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
__ bind(&miss);
@@ -658,7 +658,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
@@ -1238,7 +1238,6 @@ static void KeyedStoreGenerateGenericHelper(
a3, // Scratch regs...
t0,
t1,
- t2,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
@@ -1499,7 +1498,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 3a290117d..b37c7e041 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -31,12 +31,13 @@
#include "mips/lithium-gap-resolver-mips.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -247,6 +248,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -323,16 +339,6 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 16bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table.
- if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 12)) {
- Abort(kGeneratedCodeIsTooLarge);
- }
-
if (deopt_jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -408,7 +414,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -419,7 +425,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (r.IsDouble()) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
+ ASSERT(r.IsSmiOrTagged());
__ LoadObject(scratch, literal);
}
return scratch;
@@ -446,7 +452,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -472,7 +478,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -529,7 +535,7 @@ Operand LCodeGen::ToOperand(LOperand* op) {
Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
- return Operand(constant->handle());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@@ -676,7 +682,7 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -1081,8 +1087,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1408,10 +1413,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToRepresentation(
- LConstantOperand::cast(right_op),
- instr->hydrogen()->right()->representation());
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1699,7 +1701,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -2615,15 +2617,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
@@ -3609,14 +3611,14 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3811,79 +3813,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(f0));
- ASSERT(ToRegister(instr->global_object()).is(a0));
-
+ // Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
+ // Load native context.
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ lw(native_context, FieldMemOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds).
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
- // a2: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
- __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
+ Register state0 = ToRegister(instr->scratch());
+ __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
- // a1: state[0].
- // a0: state[1].
+ Register state1 = ToRegister(instr->scratch2());
+ __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ And(a3, a1, Operand(0xFFFF));
- __ li(t0, Operand(18273));
- __ Mul(a3, a3, t0);
- __ srl(a1, a1, 16);
- __ Addu(a1, a3, a1);
+ Register scratch3 = ToRegister(instr->scratch3());
+ Register scratch4 = scratch0();
+ __ And(scratch3, state0, Operand(0xFFFF));
+ __ li(scratch4, Operand(18273));
+ __ Mul(scratch3, scratch3, scratch4);
+ __ srl(state0, state0, 16);
+ __ Addu(state0, scratch3, state0);
// Save state[0].
- __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
+ __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ And(a3, a0, Operand(0xFFFF));
- __ li(t0, Operand(36969));
- __ Mul(a3, a3, t0);
- __ srl(a0, a0, 16),
- __ Addu(a0, a3, a0);
+ __ And(scratch3, state1, Operand(0xFFFF));
+ __ li(scratch4, Operand(36969));
+ __ Mul(scratch3, scratch3, scratch4);
+ __ srl(state1, state1, 16),
+ __ Addu(state1, scratch3, state1);
// Save state[1].
- __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
+ __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ And(a0, a0, Operand(0x3FFFF));
- __ sll(a1, a1, 14);
- __ Addu(v0, a0, a1);
-
- __ bind(deferred->exit());
+ Register random = scratch4;
+ __ And(random, state1, Operand(0x3FFFF));
+ __ sll(state0, state0, 14);
+ __ Addu(random, random, state0);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a2, Operand(0x41300000));
+ __ li(scratch3, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a2);
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Move(result, random, scratch3);
// Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a2);
- // Subtract to get the result.
- __ sub_d(f0, f12, f14);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in v0.
+ DoubleRegister scratch5 = double_scratch0();
+ __ Move(scratch5, zero_reg, scratch3);
+ __ sub_d(result, result, scratch5);
}
@@ -4077,6 +4064,16 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ Addu(code_object, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4471,12 +4468,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4523,12 +4522,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4614,16 +4615,16 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4640,16 +4641,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4720,12 +4721,14 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4850,7 +4853,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4865,11 +4868,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch.low();
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label heap_number;
@@ -4883,14 +4881,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ Branch(&done);
__ bind(&heap_number);
- __ ldc1(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ mov(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
@@ -4924,12 +4916,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4975,20 +4969,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- Register except_flag = scratch2;
+ Register except_flag = LCodeGen::scratch1();
__ EmitFPUTruncate(kRoundToMinusInf,
result_reg,
@@ -5015,21 +5001,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
+ Register scratch1 = LCodeGen::scratch0();
DoubleRegister double_input = ToDoubleRegister(instr->value());
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- Register except_flag = scratch2;
+ Register except_flag = LCodeGen::scratch1();
__ EmitFPUTruncate(kRoundToMinusInf,
result_reg,
@@ -5111,20 +5089,20 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
+ Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(at));
} else {
DeoptimizeIf(ne, instr->environment(), reg,
- Operand(target));
+ Operand(object));
}
}
@@ -5142,17 +5120,17 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5241,12 +5219,14 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5407,8 +5387,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ li(a1, Operand(instr->hydrogen()->shared_info()));
- __ push(a1);
+ __ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(instr->hydrogen()->shared_info()));
@@ -5643,12 +5622,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5662,8 +5643,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5699,9 +5681,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index f0f44e7d5..84105cae3 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -42,7 +42,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -147,7 +147,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -227,6 +226,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -441,7 +443,7 @@ class LCodeGen BASE_EMBEDDED {
int old_position_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
@@ -489,7 +491,7 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -498,7 +500,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.h b/deps/v8/src/mips/lithium-gap-resolver-mips.h
index 2506e38c3..ea1ea3cbb 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index a99793d24..4dc80226f 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -265,6 +266,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -430,6 +439,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -723,12 +741,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseRegisterAtStart(right_value);
@@ -740,12 +753,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -1089,6 +1097,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1608,9 +1624,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), a0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, f0), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, f0);
}
@@ -1831,19 +1851,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- value = UseRegisterAtStart(instr->value());
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(instr->value());
+ value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(f22);
+ LOperand* temp2 = FixedTemp(f22);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
- temp2,
- temp3));
+ temp2));
res = AssignEnvironment(res);
}
return res;
@@ -1863,14 +1881,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(result);
} else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
- TempRegister(), TempRegister())));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+ LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1937,9 +1953,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
@@ -2339,10 +2355,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2364,6 +2388,8 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
// There are no real uses of a captured object.
return NULL;
}
@@ -2410,20 +2436,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 4bf904970..29a8eac63 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -62,12 +62,12 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -161,6 +161,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -186,13 +187,17 @@ class LCodeGen;
V(ValueOf) \
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -202,7 +207,7 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
@@ -211,7 +216,7 @@ class LInstruction: public ZoneObject {
set_position(RelocInfo::kNoPosition);
}
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -310,11 +315,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -324,15 +331,15 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, T> temps_;
private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -343,8 +350,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -380,11 +387,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -392,14 +399,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -408,7 +415,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -424,7 +431,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -433,22 +440,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -464,14 +473,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -482,19 +493,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -533,7 +546,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -547,7 +560,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -568,7 +581,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -582,11 +595,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -598,14 +611,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 3> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
public:
// Used when the right hand is a constant power of 2.
LModI(LOperand* left,
@@ -641,7 +654,7 @@ class LModI: public LTemplateInstruction<1, 2, 3> {
};
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LDivI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -656,7 +669,7 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -675,7 +688,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -693,7 +706,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -710,13 +723,13 @@ class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -735,11 +748,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathFloor(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -754,7 +767,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 1> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -769,7 +782,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -782,7 +795,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -794,7 +807,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -806,7 +819,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -818,7 +831,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -830,7 +843,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -852,7 +865,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -864,7 +877,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -878,7 +891,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -893,7 +906,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -906,7 +919,7 @@ class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -923,7 +936,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -936,7 +949,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -949,11 +962,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -964,11 +977,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -982,11 +995,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1002,11 +1015,11 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1018,11 +1031,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1035,7 +1048,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1047,11 +1061,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1065,11 +1079,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1086,7 +1100,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1100,7 +1114,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1118,7 +1132,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1127,7 +1142,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1140,7 +1155,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1155,7 +1170,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1172,7 +1187,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1193,7 +1208,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1208,7 +1223,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1217,7 +1232,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1226,7 +1241,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1235,7 +1250,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1246,16 +1261,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1266,11 +1283,11 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LControlInstruction<1, 1> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1287,7 +1304,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 1> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1299,7 +1316,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1312,7 +1329,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1327,7 +1344,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1346,7 +1363,7 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1370,7 +1387,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1382,7 +1399,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1397,7 +1414,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1412,7 +1429,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1427,20 +1444,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1452,16 +1478,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1473,16 +1501,16 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1504,7 +1532,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1517,7 +1545,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1532,7 +1560,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1545,7 +1573,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1558,7 +1587,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1582,7 +1611,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* object, LOperand* key) {
inputs_[0] = object;
@@ -1596,14 +1625,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1619,7 +1648,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1634,7 +1663,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1653,7 +1682,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1670,7 +1699,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1685,11 +1714,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1701,7 +1730,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1714,7 +1743,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1723,28 +1769,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1756,14 +1802,14 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1775,7 +1821,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1787,19 +1833,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1810,13 +1856,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1827,26 +1873,26 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1861,7 +1907,7 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1873,18 +1919,18 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1895,13 +1941,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1912,13 +1958,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1928,7 +1974,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1940,7 +1986,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1953,7 +1999,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1965,7 +2011,7 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1977,7 +2023,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
@@ -1989,7 +2035,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2006,17 +2052,13 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2026,17 +2068,13 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2046,22 +2084,19 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2070,7 +2105,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2082,7 +2117,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2095,7 +2130,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2112,7 +2147,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2127,7 +2162,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
@@ -2136,7 +2171,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2149,14 +2184,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2175,13 +2210,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
@@ -2196,13 +2231,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp) {
@@ -2217,7 +2252,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2226,7 +2261,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2242,7 +2277,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2258,7 +2293,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2273,7 +2308,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2286,20 +2321,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2312,7 +2347,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2325,7 +2360,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2337,7 +2372,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2350,7 +2385,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2364,7 +2399,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2376,7 +2411,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2390,7 +2425,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[1] = size;
@@ -2407,21 +2442,21 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2434,7 +2469,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2446,7 +2481,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2459,11 +2494,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2476,16 +2511,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2497,7 +2534,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2509,7 +2546,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2525,7 +2562,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2539,7 +2576,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2554,7 +2591,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2564,7 +2601,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 5becf7c37..a85b0d803 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1298,60 +1298,6 @@ void MacroAssembler::Clz(Register rd, Register rs) {
}
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-// This method implementation differs from the ARM version for performance
-// reasons.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32) {
- Label right_exponent, done;
- // Get exponent word (ENDIAN issues).
- lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, zero_reg);
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
- // If the exponent is higher than that then go to not_int32 case. This
- // catches numbers that don't fit in a signed int32, infinities and NaNs.
- Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- Subu(scratch2, scratch2, Operand(zero_exponent));
- // Dest already has a Smi zero.
- Branch(&done, lt, scratch2, Operand(zero_reg));
- bind(&right_exponent);
-
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
-
- bind(&done);
-}
-
-
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
@@ -1416,104 +1362,12 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
}
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
- // Extract the biased exponent in result.
- Ext(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- Subu(scratch, result, HeapNumber::kExponentMask);
- Movz(result, zero_reg, scratch);
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Express exponent as delta to (number of mantissa bits + 31).
- Subu(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- Branch(&normal_exponent, le, result, Operand(zero_reg));
- mov(result, zero_reg);
- Branch(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- And(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
- // to check for this specific case.
- Label high_shift_needed, high_shift_done;
- Branch(&high_shift_needed, lt, scratch, Operand(32));
- mov(input_high, zero_reg);
- Branch(&high_shift_done);
- bind(&high_shift_needed);
-
- // Set the implicit 1 before the mantissa part in input_high.
- Or(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- sllv(input_high, input_high, scratch);
-
- bind(&high_shift_done);
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- li(at, 32);
- subu(scratch, at, scratch);
- Branch(&pos_shift, ge, scratch, Operand(zero_reg));
-
- // Negate scratch.
- Subu(scratch, zero_reg, scratch);
- sllv(input_low, input_low, scratch);
- Branch(&shift_done);
-
- bind(&pos_shift);
- srlv(input_low, input_low, scratch);
-
- bind(&shift_done);
- Or(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- mov(scratch, sign);
- result = sign;
- sign = no_reg;
- Subu(result, zero_reg, input_high);
- Movz(result, input_high, scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- ASSERT(!scratch2.is(result));
- ASSERT(!scratch3.is(result));
- ASSERT(!scratch3.is(scratch2));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch2) &&
- !scratch.is(scratch3));
- ASSERT(!single_scratch.is(double_input));
-
- Label done;
- Label manual;
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ Register scratch = at;
+ Register scratch2 = t9;
// Clear cumulative exception flags and save the FCSR.
cfc1(scratch2, FCSR);
@@ -1529,16 +1383,66 @@ void MacroAssembler::EmitECMATruncate(Register result,
scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Load the double value and perform a manual truncation.
- Register input_high = scratch2;
- Register input_low = scratch3;
- Move(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
+ Branch(done, eq, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ sdc1(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(sp, result, 0, true, true);
+ CallStub(&stub);
+
+ Addu(sp, sp, Operand(kDoubleSize));
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = f12;
+ ASSERT(!result.is(object));
+
+ ldc1(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label done;
+ ASSERT(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+ TruncateHeapNumberToI(result, object);
+
bind(&done);
}
@@ -3266,7 +3170,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) {
sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
@@ -3428,7 +3332,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
@@ -3485,25 +3388,11 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- FloatingPointHelper::Destination destination;
- destination = FloatingPointHelper::kFPURegisters;
-
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
+ mtc1(untagged_value, f2);
+ cvt_d_w(f0, f2);
+ sdc1(f0, MemOperand(scratch1, 0));
bind(&done);
}
@@ -3963,7 +3852,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_fp) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@@ -3992,14 +3880,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
- // The O32 ABI requires us to pass a pointer in a0 where the returned struct
- // (4 bytes) will be placed. This is also built into the Simulator.
- // Set up the pointer to the returned value (a0). It was allocated in
- // EnterExitFrame.
- if (returns_handle) {
- addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
- }
-
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
@@ -4039,19 +3919,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Label leave_exit_frame;
Label return_value_loaded;
- if (returns_handle) {
- Label load_return_value;
-
- // As mentioned above, on MIPS a pointer is returned - we need to
- // dereference it to get the actual return value (which is also a pointer).
- lw(v0, MemOperand(v0));
-
- Branch(&load_return_value, eq, v0, Operand(zero_reg));
- // Dereference returned value.
- lw(v0, MemOperand(v0));
- Branch(&return_value_loaded);
- bind(&load_return_value);
- }
// Load value from ReturnValue.
lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize));
bind(&return_value_loaded);
@@ -4422,15 +4289,6 @@ void MacroAssembler::Assert(Condition cc, BailoutReason reason,
}
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(at, index);
- Check(eq, kRegisterDidNotMatchExpectedRoot, reg, Operand(at));
- }
-}
-
-
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
ASSERT(!elements.is(at));
@@ -4477,6 +4335,11 @@ void MacroAssembler::Abort(BailoutReason reason) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
li(a0, Operand(p0));
@@ -4618,6 +4481,116 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
+void MacroAssembler::LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label is_smi, done;
+
+ UntagAndJumpIfSmi(scratch, object, &is_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+
+ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Branch(&done);
+
+ bind(&is_smi);
+ mtc1(scratch, dst);
+ cvt_d_w(dst, dst);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
+ mtc1(scratch1, double_scratch);
+ cvt_d_w(double_dst, double_scratch);
+ Branch(&done);
+
+ bind(&obj_is_not_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ // Load the double value.
+ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ scratch1,
+ double_dst,
+ at,
+ double_scratch,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+
+ Label done, maybe_undefined;
+
+ UntagAndJumpIfSmi(dst, object, &done);
+
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ // Load the double value.
+ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ dst,
+ double_scratch0,
+ scratch1,
+ double_scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ Branch(&done);
+
+ bind(&maybe_undefined);
+ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Branch(not_int32, ne, object, Operand(at));
+ // |undefined| is truncated to 0.
+ li(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
+
+ bind(&done);
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
@@ -4937,13 +4910,11 @@ void MacroAssembler::AssertName(Register object) {
}
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- BailoutReason reason) {
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Check(eq, reason, src, Operand(at));
+ ASSERT(!reg.is(at));
+ LoadRoot(at, index);
+ Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
}
}
@@ -4953,7 +4924,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register scratch,
Label* on_not_heap_number) {
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
@@ -5541,6 +5512,30 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 3b3cfdb8a..75ded8849 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -90,6 +90,13 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
@@ -751,17 +758,6 @@ class MacroAssembler: public Assembler {
BranchF(target, nan, cc, cmp1, cmp2, bd);
};
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If FPU is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32);
-
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
@@ -776,26 +772,71 @@ class MacroAssembler: public Assembler {
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister input,
+ Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
@@ -986,16 +1027,13 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
+ // the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
Label* fail,
int elements_offset = 0);
@@ -1233,7 +1271,6 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_fp);
// Jump to the builtin routine.
@@ -1281,7 +1318,6 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
@@ -1368,11 +1404,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
- // Abort execution if argument is not the root value with the given index,
+ // Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- BailoutReason reason);
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 7b67a7b47..1a04fd102 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -882,7 +882,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -1086,7 +1086,6 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 914a75866..ea8b65948 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1387,27 +1387,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
-// struct from the function (which is currently the case). This means we pass
-// the first argument in a1 instead of a0.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-// Here, we pass the first argument in a0, because this function
-// does not return a struct.
-typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
- int32_t arg0, int32_t arg1);
-typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
-// See comment at SimulatorRuntimeDirectApiCall.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-// See comment at SimulatorRuntimeDirectApiCallNew.
-typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
- int32_t arg1);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
-typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
@@ -1553,102 +1538,41 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- } else if (
- redirection->type() == ExternalReference::DIRECT_API_CALL ||
- redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
- if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- // See comment at type definition of SimulatorRuntimeDirectApiCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x\n",
- reinterpret_cast<void*>(external), arg1);
- }
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x\n",
- reinterpret_cast<void*>(external), arg0);
- }
- SimulatorRuntimeDirectApiCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
- target(arg0);
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x\n",
+ reinterpret_cast<void*>(external), arg0);
}
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
} else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL ||
- redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
- if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
- // See comment at type definition of SimulatorRuntimeDirectApiCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2);
- }
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1);
- }
- SimulatorRuntimeProfilingApiCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
- target(arg0, arg1);
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
}
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
- if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- // See comment at type definition of SimulatorRuntimeDirectGetterCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2);
- }
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1);
- }
- SimulatorRuntimeDirectGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
- target(arg0, arg1);
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
}
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
- if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
- // See comment at type definition of SimulatorRuntimeProfilingGetterCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2, arg3);
- }
- SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg1, arg2, arg3);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1, arg2);
- }
- SimulatorRuntimeProfilingGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(external);
- target(arg0, arg1, arg2);
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
}
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, arg2);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 299ffd22b..58452cad1 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -777,16 +777,17 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
__ li(scratch, Operand(interceptor));
__ Push(scratch, receiver, holder);
- __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ push(scratch);
}
@@ -801,7 +802,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ PrepareCEntryArgs(6);
+ __ PrepareCEntryArgs(StubCache::kInterceptorArgsLength);
__ PrepareCEntryFunction(ref);
CEntryStub stub(1);
@@ -879,51 +880,31 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the first argument in a1 instead of a0, if returns_handle is true.
- // CallApiFunctionAndReturn will set up a0.
-
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
-
- Register first_arg = returns_handle ? a1 : a0;
- Register second_arg = returns_handle ? a2 : a1;
-
- // first_arg = v8::Arguments&
+ // a0 = v8::Arguments&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(first_arg, sp, kPointerSize);
+ __ Addu(a0, sp, kPointerSize);
// v8::Arguments::implicit_args_
- __ sw(a2, MemOperand(first_arg, 0 * kPointerSize));
+ __ sw(a2, MemOperand(a0, 0 * kPointerSize));
// v8::Arguments::values_
__ Addu(t0, a2, Operand(argc * kPointerSize));
- __ sw(t0, MemOperand(first_arg, 1 * kPointerSize));
+ __ sw(t0, MemOperand(a0, 1 * kPointerSize));
// v8::Arguments::length_ = argc
__ li(t0, Operand(argc));
- __ sw(t0, MemOperand(first_arg, 2 * kPointerSize));
+ __ sw(t0, MemOperand(a0, 2 * kPointerSize));
// v8::Arguments::is_construct_call = 0
- __ sw(zero_reg, MemOperand(first_arg, 3 * kPointerSize));
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_API_CALL :
- ExternalReference::DIRECT_API_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref =
ExternalReference(&fun,
type,
masm->isolate());
-
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_API_CALL :
- ExternalReference::PROFILING_API_CALL_NEW;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
masm->isolate());
@@ -932,12 +913,41 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
- second_arg,
+ a1,
kStackUnwindSpace,
- returns_handle,
kFastApiCallArguments + 1);
}
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Assign stack space for the call arguments.
+ __ Subu(sp, sp, Operand(stack_space * kPointerSize));
+ // Write holder to stack frame.
+ __ sw(receiver, MemOperand(sp, 0));
+ // Write receiver to stack frame.
+ int index = stack_space - 1;
+ __ sw(receiver, MemOperand(sp, index * kPointerSize));
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
+ }
+
+ GenerateFastApiDirectCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -1098,7 +1108,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -1156,22 +1166,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If FPU is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register scratch1) {
- __ mtc1(ival, f0);
- __ cvt_s_w(f0, f0);
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ swc1(f0, MemOperand(scratch1, 0));
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1324,7 +1318,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1411,10 +1405,26 @@ void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
__ push(receiver());
__ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
@@ -1425,31 +1435,18 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
__ Subu(sp, sp, 6 * kPointerSize);
- __ sw(reg, MemOperand(sp, 5 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
__ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
__ li(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ sw(scratch4(), MemOperand(sp, 1 * kPointerSize));
+ __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
+ __ sw(reg, MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
- Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
-
- Register first_arg = returns_handle ? a1 : a0;
- Register second_arg = returns_handle ? a2 : a1;
- Register third_arg = returns_handle ? a3 : a2;
-
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(first_arg, sp); // (first argument - see note below) = Handle<Name>
-
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the arguments in a1-a2 instead of a0-a1, if returns_handle is true.
- // CallApiFunctionAndReturn will set up a0.
+ __ mov(a0, sp); // (first argument - a0) = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
@@ -1458,35 +1455,27 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
- // (second argument - see note above) = AccessorInfo&
- __ Addu(second_arg, sp, kPointerSize);
+ // (second argument - a1) = AccessorInfo&
+ __ Addu(a1, sp, kPointerSize);
const int kStackUnwindSpace = kFastApiCallArguments + 1;
-
+ Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_GETTER_CALL :
- ExternalReference::DIRECT_GETTER_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_GETTER_CALL :
- ExternalReference::PROFILING_GETTER_CALL_NEW;
+ ExternalReference::PROFILING_GETTER_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
isolate());
__ CallApiFunctionAndReturn(ref,
getter_address,
thunk_ref,
- third_arg,
+ a2,
kStackUnwindSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1571,7 +1560,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -1819,25 +1808,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin,
DONT_DO_SMI_CHECK);
- // Get the array's length into r0 and calculate new length.
- __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ // Get the array's length into v0 and calculate new length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
+ __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, a0, Operand(t0));
+ __ Branch(&call_builtin, gt, v0, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
- t0, a0, elements, a3, t1, a2, t5,
+ t0, v0, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
// Save new length.
- __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi.
__ DropAndRet(argc + 1);
@@ -2854,6 +2843,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2938,46 +2945,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- __ Branch(&miss, ne, scratch1(), Operand(Handle<Map>(object->map())));
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ li(scratch1(), Operand(cell));
- __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
- __ lw(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- __ Branch(&miss, eq, scratch3(), Operand(scratch2()));
-
- // Store the value in the cell.
- __ sw(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- __ mov(v0, a0); // Stored value must be returned in v0.
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(
- counters->named_store_global_inline(), 1, scratch1(), scratch2());
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(
- counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
@@ -3229,570 +3196,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* fail) {
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
- __ EmitFPUTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- at,
- double_scratch1,
- scratch1,
- kCheckForInexactConversion);
-
- __ Branch(fail, ne, scratch1, Operand(zero_reg));
-
- __ SmiTagCheckOverflow(key, scratch0, scratch1);
- __ BranchOnOverflow(fail, scratch1);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = a0;
- Register key = a1;
- Register receiver = a2;
- // a3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // a3: external array.
-
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(t1, value);
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
- // t1: value (integer).
-
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- // Clamp the value to [0..255].
- // v0 is used as a scratch register here.
- Label done;
- __ li(v0, Operand(255));
- // Normal branch: nop in delay slot.
- __ Branch(&done, gt, t1, Operand(v0));
- // Use delay slot in this branch.
- __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
- __ mov(v0, zero_reg); // In delay slot.
- __ mov(v0, t1); // Value is in range 0..255.
- __ bind(&done);
- __ mov(t1, v0);
-
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- }
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(t0, key);
- StoreIntAsFloat(masm, a3, t0, t1, t2);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, key, 2);
- __ addu(a3, a3, t8);
- // a3: effective address of the double element
- FloatingPointHelper::Destination destination;
- destination = FloatingPointHelper::kFPURegisters;
- FloatingPointHelper::ConvertIntToDouble(
- masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
- t0, f2); // These are: scratch2, single_scratch.
- __ sdc1(f0, MemOperand(a3, 0));
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, a0 holds the value which is the return value.
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // a3: external array.
- __ bind(&check_heap_number);
- __ GetObjectType(value, t1, t2);
- __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
-
-
- __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ swc1(f0, MemOperand(t8, 0));
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- __ sdc1(f0, MemOperand(t8, 0));
- } else {
- __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, a0 holds the value
- // which is the return value.
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -- a4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register scratch = t0;
- Register elements_reg = a3;
- Register length_reg = t1;
- Register scratch2 = t2;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- __ Branch(&grow, hs, key_reg, Operand(scratch));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (a0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ Ret(USE_DELAY_SLOT);
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch (elements backing store)
- // -- t0 : scratch (elements_reg)
- // -- t1 : scratch (mantissa_reg)
- // -- t2 : scratch (exponent_reg)
- // -- t3 : scratch4
- // -- t4 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register elements_reg = a3;
- Register scratch1 = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register scratch4 = t3;
- Register scratch5 = t4;
- Register length_reg = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
-
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- if (IsGrowStoreMode(store_mode)) {
- __ Branch(&grow, hs, key_reg, Operand(scratch1));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
- }
-
- __ bind(&finish_store);
-
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value_reg); // In delay slot.
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ li(scratch1, Operand(kHoleNanLower32));
- __ li(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ sw(scratch1, FieldMemOperand(elements_reg, offset));
- __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret(USE_DELAY_SLOT);
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch1));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index c1edcb1b3..9cf9e2e8a 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -314,9 +314,6 @@ int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
- // Disable the i18n extension, as it doesn't support being snapshotted yet.
- i::FLAG_enable_i18n = false;
-
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
@@ -333,8 +330,9 @@ int main(int argc, char** argv) {
exit(1);
}
#endif
- i::Serializer::Enable();
Isolate* isolate = Isolate::GetCurrent();
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Serializer::Enable(internal_isolate);
Persistent<Context> context;
{
HandleScope handle_scope(isolate);
@@ -391,21 +389,23 @@ int main(int argc, char** argv) {
// Make sure all builtin scripts are cached.
{ HandleScope scope(isolate);
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ internal_isolate->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
+ internal_isolate->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
- context.Dispose(isolate);
+ context.Dispose();
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
- i::StartupSerializer ser(&sink);
+ i::StartupSerializer ser(internal_isolate, &sink);
ser.SerializeStrongReferences();
- i::PartialSerializer partial_ser(&ser, sink.partial_sink());
+ i::PartialSerializer partial_ser(
+ internal_isolate, &ser, sink.partial_sink());
partial_ser.Serialize(&raw_context);
ser.SerializeWeakReferences();
diff --git a/deps/v8/src/natives.h b/deps/v8/src/natives.h
index e3f69d1da..5f34420d0 100644
--- a/deps/v8/src/natives.h
+++ b/deps/v8/src/natives.h
@@ -36,7 +36,7 @@ typedef bool (*NativeSourceCallback)(Vector<const char> name,
int index);
enum NativeType {
- CORE, EXPERIMENTAL, D8, TEST, I18N
+ CORE, EXPERIMENTAL, D8, TEST
};
template <NativeType type>
@@ -61,7 +61,6 @@ class NativesCollection {
typedef NativesCollection<CORE> Natives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
-typedef NativesCollection<I18N> I18NNatives;
} } // namespace v8::internal
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index f5e0d9d56..1035792e8 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -27,12 +27,41 @@
"use strict";
+// Overview:
+//
+// This file contains all of the routing and accounting for Object.observe.
+// User code will interact with these mechanisms via the Object.observe APIs
+// and, as a side effect of mutation objects which are observed. The V8 runtime
+// (both C++ and JS) will interact with these mechanisms primarily by enqueuing
+// proper change records for objects which were mutated. The Object.observe
+// routing and accounting consists primarily of three participants
+//
+// 1) ObjectInfo. This represents the observed state of a given object. It
+// records what callbacks are observing the object, with what options, and
+// what "change types" are in progress on the object (i.e. via
+// notifier.performChange).
+//
+// 2) CallbackInfo. This represents a callback used for observation. It holds
+// the records which must be delivered to the callback, as well as the global
+// priority of the callback (which determines delivery order between
+// callbacks).
+//
+// 3) observationState.pendingObservers. This is the set of observers which
+// have change records which must be delivered. During "normal" delivery
+// (i.e. not Object.deliverChangeRecords), this is the mechanism by which
+// callbacks are invoked in the proper order until there are no more
+// change records pending to a callback.
+//
+// Note that in order to reduce allocation and processing costs, the
+// implementation of (1) and (2) have "optimized" states which represent
+// common cases which can be handled more efficiently.
+
var observationState = %GetObservationState();
if (IS_UNDEFINED(observationState.callbackInfoMap)) {
observationState.callbackInfoMap = %ObservationWeakMapCreate();
observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierTargetMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = new InternalArray;
+ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = null;
observationState.nextCallbackPriority = 0;
}
@@ -59,126 +88,191 @@ ObservationWeakMap.prototype = {
var callbackInfoMap =
new ObservationWeakMap(observationState.callbackInfoMap);
var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
-var notifierTargetMap =
- new ObservationWeakMap(observationState.notifierTargetMap);
-
-function CreateObjectInfo(object) {
- var info = {
- changeObservers: new InternalArray,
- notifier: null,
- inactiveObservers: new InternalArray,
- performing: { __proto__: null },
- performingCount: 0,
- };
- objectInfoMap.set(object, info);
- return info;
+var notifierObjectInfoMap =
+ new ObservationWeakMap(observationState.notifierObjectInfoMap);
+
+function TypeMapCreate() {
+ return { __proto__: null };
}
-var defaultAcceptTypes = {
- __proto__: null,
- 'new': true,
- 'updated': true,
- 'deleted': true,
- 'prototype': true,
- 'reconfigured': true
-};
+function TypeMapAddType(typeMap, type, ignoreDuplicate) {
+ typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
+}
-function CreateObserver(callback, accept) {
- var observer = {
+function TypeMapRemoveType(typeMap, type) {
+ typeMap[type]--;
+}
+
+function TypeMapCreateFromList(typeList) {
+ var typeMap = TypeMapCreate();
+ for (var i = 0; i < typeList.length; i++) {
+ TypeMapAddType(typeMap, typeList[i], true);
+ }
+ return typeMap;
+}
+
+function TypeMapHasType(typeMap, type) {
+ return !!typeMap[type];
+}
+
+function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
+ if (!typeMap1 || !typeMap2)
+ return true;
+
+ for (var type in typeMap1) {
+ if (TypeMapHasType(typeMap1, type) && TypeMapHasType(typeMap2, type))
+ return false;
+ }
+
+ return true;
+}
+
+var defaultAcceptTypes = TypeMapCreateFromList([
+ 'new',
+ 'updated',
+ 'deleted',
+ 'prototype',
+ 'reconfigured'
+]);
+
+// An Observer is a registration to observe an object by a callback with
+// a given set of accept types. If the set of accept types is the default
+// set for Object.observe, the observer is represented as a direct reference
+// to the callback. An observer never changes its accept types and thus never
+// needs to "normalize".
+function ObserverCreate(callback, acceptList) {
+ return IS_UNDEFINED(acceptList) ? callback : {
__proto__: null,
callback: callback,
- accept: defaultAcceptTypes
+ accept: TypeMapCreateFromList(acceptList)
};
+}
- if (IS_UNDEFINED(accept))
- return observer;
-
- var acceptMap = { __proto__: null };
- for (var i = 0; i < accept.length; i++)
- acceptMap[accept[i]] = true;
+function ObserverGetCallback(observer) {
+ return IS_SPEC_FUNCTION(observer) ? observer : observer.callback;
+}
- observer.accept = acceptMap;
- return observer;
+function ObserverGetAcceptTypes(observer) {
+ return IS_SPEC_FUNCTION(observer) ? defaultAcceptTypes : observer.accept;
}
function ObserverIsActive(observer, objectInfo) {
- if (objectInfo.performingCount === 0)
- return true;
+ return TypeMapIsDisjointFrom(ObjectInfoGetPerformingTypes(objectInfo),
+ ObserverGetAcceptTypes(observer));
+}
- var performing = objectInfo.performing;
- for (var type in performing) {
- if (performing[type] > 0 && observer.accept[type])
- return false;
+function ObjectInfoGet(object) {
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo)) {
+ if (!%IsJSProxy(object))
+ %SetIsObserved(object);
+
+ objectInfo = {
+ object: object,
+ changeObservers: null,
+ notifier: null,
+ performing: null,
+ performingCount: 0,
+ };
+ objectInfoMap.set(object, objectInfo);
}
+ return objectInfo;
+}
- return true;
+function ObjectInfoGetFromNotifier(notifier) {
+ return notifierObjectInfoMap.get(notifier);
}
-function ObserverIsInactive(observer, objectInfo) {
- return !ObserverIsActive(observer, objectInfo);
+function ObjectInfoGetNotifier(objectInfo) {
+ if (IS_NULL(objectInfo.notifier)) {
+ objectInfo.notifier = { __proto__: notifierPrototype };
+ notifierObjectInfoMap.set(objectInfo.notifier, objectInfo);
+ }
+
+ return objectInfo.notifier;
}
-function RemoveNullElements(from) {
- var i = 0;
- var j = 0;
- for (; i < from.length; i++) {
- if (from[i] === null)
- continue;
- if (j < i)
- from[j] = from[i];
- j++;
+function ObjectInfoGetObject(objectInfo) {
+ return objectInfo.object;
+}
+
+function ChangeObserversIsOptimized(changeObservers) {
+ return typeof changeObservers === 'function' ||
+ typeof changeObservers.callback === 'function';
+}
+
+// The set of observers on an object is called 'changeObservers'. The first
+// observer is referenced directly via objectInfo.changeObservers. When a second
+// is added, changeObservers "normalizes" to become a mapping of callback
+// priority -> observer and is then stored on objectInfo.changeObservers.
+function ObjectInfoNormalizeChangeObservers(objectInfo) {
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ var observer = objectInfo.changeObservers;
+ var callback = ObserverGetCallback(observer);
+ var callbackInfo = CallbackInfoGet(callback);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ objectInfo.changeObservers = { __proto__: null };
+ objectInfo.changeObservers[priority] = observer;
}
+}
- if (i !== j)
- from.length = from.length - (i - j);
+function ObjectInfoAddObserver(objectInfo, callback, acceptList) {
+ var callbackInfo = CallbackInfoGetOrCreate(callback);
+ var observer = ObserverCreate(callback, acceptList);
+
+ if (!objectInfo.changeObservers) {
+ objectInfo.changeObservers = observer;
+ return;
+ }
+
+ ObjectInfoNormalizeChangeObservers(objectInfo);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ objectInfo.changeObservers[priority] = observer;
}
-function RepartitionObservers(conditionFn, from, to, objectInfo) {
- var anyRemoved = false;
- for (var i = 0; i < from.length; i++) {
- var observer = from[i];
- if (conditionFn(observer, objectInfo)) {
- anyRemoved = true;
- from[i] = null;
- to.push(observer);
- }
+function ObjectInfoRemoveObserver(objectInfo, callback) {
+ if (!objectInfo.changeObservers)
+ return;
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ if (callback === ObserverGetCallback(objectInfo.changeObservers))
+ objectInfo.changeObservers = null;
+ return;
}
- if (anyRemoved)
- RemoveNullElements(from);
+ var callbackInfo = CallbackInfoGet(callback);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ delete objectInfo.changeObservers[priority];
}
-function BeginPerformChange(objectInfo, type) {
- objectInfo.performing[type] = (objectInfo.performing[type] || 0) + 1;
+function ObjectInfoHasActiveObservers(objectInfo) {
+ if (IS_UNDEFINED(objectInfo) || !objectInfo.changeObservers)
+ return false;
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers))
+ return ObserverIsActive(objectInfo.changeObservers, objectInfo);
+
+ for (var priority in objectInfo.changeObservers) {
+ if (ObserverIsActive(objectInfo.changeObservers[priority], objectInfo))
+ return true;
+ }
+
+ return false;
+}
+
+function ObjectInfoAddPerformingType(objectInfo, type) {
+ objectInfo.performing = objectInfo.performing || TypeMapCreate();
+ TypeMapAddType(objectInfo.performing, type);
objectInfo.performingCount++;
- RepartitionObservers(ObserverIsInactive,
- objectInfo.changeObservers,
- objectInfo.inactiveObservers,
- objectInfo);
}
-function EndPerformChange(objectInfo, type) {
- objectInfo.performing[type]--;
+function ObjectInfoRemovePerformingType(objectInfo, type) {
objectInfo.performingCount--;
- RepartitionObservers(ObserverIsActive,
- objectInfo.inactiveObservers,
- objectInfo.changeObservers,
- objectInfo);
-}
-
-function EnsureObserverRemoved(objectInfo, callback) {
- function remove(observerList) {
- for (var i = 0; i < observerList.length; i++) {
- if (observerList[i].callback === callback) {
- observerList.splice(i, 1);
- return true;
- }
- }
- return false;
- }
+ TypeMapRemoveType(objectInfo.performing, type);
+}
- if (!remove(objectInfo.changeObservers))
- remove(objectInfo.inactiveObservers);
+function ObjectInfoGetPerformingTypes(objectInfo) {
+ return objectInfo.performingCount > 0 ? objectInfo.performing : null;
}
function AcceptArgIsValid(arg) {
@@ -198,12 +292,31 @@ function AcceptArgIsValid(arg) {
return true;
}
-function EnsureCallbackPriority(callback) {
- if (!callbackInfoMap.has(callback))
- callbackInfoMap.set(callback, observationState.nextCallbackPriority++);
+// CallbackInfo's optimized state is just a number which represents its global
+// priority. When a change record must be enqueued for the callback, it
+// normalizes. When delivery clears any pending change records, it re-optimizes.
+function CallbackInfoGet(callback) {
+ return callbackInfoMap.get(callback);
}
-function NormalizeCallbackInfo(callback) {
+function CallbackInfoGetOrCreate(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (!IS_UNDEFINED(callbackInfo))
+ return callbackInfo;
+
+ var priority = observationState.nextCallbackPriority++
+ callbackInfoMap.set(callback, priority);
+ return priority;
+}
+
+function CallbackInfoGetPriority(callbackInfo) {
+ if (IS_NUMBER(callbackInfo))
+ return callbackInfo;
+ else
+ return callbackInfo.priority;
+}
+
+function CallbackInfoNormalize(callback) {
var callbackInfo = callbackInfoMap.get(callback);
if (IS_NUMBER(callbackInfo)) {
var priority = callbackInfo;
@@ -214,32 +327,18 @@ function NormalizeCallbackInfo(callback) {
return callbackInfo;
}
-function ObjectObserve(object, callback, accept) {
+function ObjectObserve(object, callback, acceptList) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["observe"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["observe"]);
if (ObjectIsFrozen(callback))
throw MakeTypeError("observe_callback_frozen");
- if (!AcceptArgIsValid(accept))
+ if (!AcceptArgIsValid(acceptList))
throw MakeTypeError("observe_accept_invalid");
- EnsureCallbackPriority(callback);
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) {
- objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object);
- }
-
- EnsureObserverRemoved(objectInfo, callback);
-
- var observer = CreateObserver(callback, accept);
- if (ObserverIsActive(observer, objectInfo))
- objectInfo.changeObservers.push(observer);
- else
- objectInfo.inactiveObservers.push(observer);
-
+ var objectInfo = ObjectInfoGet(object);
+ ObjectInfoAddObserver(objectInfo, callback, acceptList);
return object;
}
@@ -253,7 +352,7 @@ function ObjectUnobserve(object, callback) {
if (IS_UNDEFINED(objectInfo))
return object;
- EnsureObserverRemoved(objectInfo, callback);
+ ObjectInfoRemoveObserver(objectInfo, callback);
return object;
}
@@ -268,41 +367,67 @@ function ArrayUnobserve(object, callback) {
return ObjectUnobserve(object, callback);
}
-function EnqueueToCallback(callback, changeRecord) {
- var callbackInfo = NormalizeCallbackInfo(callback);
+function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck) {
+ if (!ObserverIsActive(observer, objectInfo) ||
+ !TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) {
+ return;
+ }
+
+ var callback = ObserverGetCallback(observer);
+ if (needsAccessCheck &&
+ // Drop all splice records on the floor for access-checked objects
+ (changeRecord.type == 'splice' ||
+ !%IsAccessAllowedForObserver(
+ callback, changeRecord.object, changeRecord.name))) {
+ return;
+ }
+
+ var callbackInfo = CallbackInfoNormalize(callback);
+ if (!observationState.pendingObservers)
+ observationState.pendingObservers = { __proto__: null };
observationState.pendingObservers[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
%SetObserverDeliveryPending();
}
-function EnqueueChangeRecord(changeRecord, observers) {
+function ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord,
+ skipAccessCheck) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
- for (var i = 0; i < observers.length; i++) {
- var observer = observers[i];
- if (IS_UNDEFINED(observer.accept[changeRecord.type]))
- continue;
+ var needsAccessCheck = !skipAccessCheck &&
+ %IsAccessCheckNeeded(changeRecord.object);
- EnqueueToCallback(observer.callback, changeRecord);
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ var observer = objectInfo.changeObservers;
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck);
+ return;
+ }
+
+ for (var priority in objectInfo.changeObservers) {
+ var observer = objectInfo.changeObservers[priority];
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck);
}
}
function BeginPerformSplice(array) {
var objectInfo = objectInfoMap.get(array);
if (!IS_UNDEFINED(objectInfo))
- BeginPerformChange(objectInfo, 'splice');
+ ObjectInfoAddPerformingType(objectInfo, 'splice');
}
function EndPerformSplice(array) {
var objectInfo = objectInfoMap.get(array);
if (!IS_UNDEFINED(objectInfo))
- EndPerformChange(objectInfo, 'splice');
+ ObjectInfoRemovePerformingType(objectInfo, 'splice');
}
function EnqueueSpliceRecord(array, index, removed, addedCount) {
var objectInfo = objectInfoMap.get(array);
- if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
+ if (!ObjectInfoHasActiveObservers(objectInfo))
return;
var changeRecord = {
@@ -315,19 +440,19 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
ObjectFreeze(changeRecord);
ObjectFreeze(changeRecord.removed);
- EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
+ ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
}
function NotifyChange(type, object, name, oldValue) {
var objectInfo = objectInfoMap.get(object);
- if (objectInfo.changeObservers.length === 0)
+ if (!ObjectInfoHasActiveObservers(objectInfo))
return;
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
ObjectFreeze(changeRecord);
- EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
+ ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
}
var notifierPrototype = {};
@@ -336,17 +461,16 @@ function ObjectNotifierNotify(changeRecord) {
if (!IS_SPEC_OBJECT(this))
throw MakeTypeError("called_on_non_object", ["notify"]);
- var target = notifierTargetMap.get(this);
- if (IS_UNDEFINED(target))
+ var objectInfo = ObjectInfoGetFromNotifier(this);
+ if (IS_UNDEFINED(objectInfo))
throw MakeTypeError("observe_notify_non_notifier");
if (!IS_STRING(changeRecord.type))
throw MakeTypeError("observe_type_non_string");
- var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
+ if (!ObjectInfoHasActiveObservers(objectInfo))
return;
- var newRecord = { object: target };
+ var newRecord = { object: ObjectInfoGetObject(objectInfo) };
for (var prop in changeRecord) {
if (prop === 'object') continue;
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
@@ -354,36 +478,28 @@ function ObjectNotifierNotify(changeRecord) {
}
ObjectFreeze(newRecord);
- EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
+ ObjectInfoEnqueueChangeRecord(objectInfo, newRecord,
+ true /* skip access check */);
}
-function ObjectNotifierPerformChange(changeType, changeFn, receiver) {
+function ObjectNotifierPerformChange(changeType, changeFn) {
if (!IS_SPEC_OBJECT(this))
throw MakeTypeError("called_on_non_object", ["performChange"]);
- var target = notifierTargetMap.get(this);
- if (IS_UNDEFINED(target))
+ var objectInfo = ObjectInfoGetFromNotifier(this);
+
+ if (IS_UNDEFINED(objectInfo))
throw MakeTypeError("observe_notify_non_notifier");
if (!IS_STRING(changeType))
throw MakeTypeError("observe_perform_non_string");
if (!IS_SPEC_FUNCTION(changeFn))
throw MakeTypeError("observe_perform_non_function");
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(changeFn) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(changeFn)) {
- receiver = ToObject(receiver);
- }
-
- var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo))
- return;
-
- BeginPerformChange(objectInfo, changeType);
+ ObjectInfoAddPerformingType(objectInfo, changeType);
try {
- %_CallFunction(receiver, changeFn);
+ %_CallFunction(void 0, changeFn);
} finally {
- EndPerformChange(objectInfo, changeType);
+ ObjectInfoRemovePerformingType(objectInfo, changeType);
}
}
@@ -393,18 +509,8 @@ function ObjectGetNotifier(object) {
if (ObjectIsFrozen(object)) return null;
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) {
- objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object);
- }
-
- if (IS_NULL(objectInfo.notifier)) {
- objectInfo.notifier = { __proto__: notifierPrototype };
- notifierTargetMap.set(objectInfo.notifier, object);
- }
-
- return objectInfo.notifier;
+ var objectInfo = ObjectInfoGet(object);
+ return ObjectInfoGetNotifier(objectInfo);
}
function CallbackDeliverPending(callback) {
@@ -417,12 +523,14 @@ function CallbackDeliverPending(callback) {
var priority = callbackInfo.priority;
callbackInfoMap.set(callback, priority);
- delete observationState.pendingObservers[priority];
+ if (observationState.pendingObservers)
+ delete observationState.pendingObservers[priority];
+
var delivered = [];
%MoveArrayContents(callbackInfo, delivered);
try {
- %Call(void 0, delivered, callback);
+ %_CallFunction(void 0, delivered, callback);
} catch (ex) {}
return true;
}
@@ -435,9 +543,9 @@ function ObjectDeliverChangeRecords(callback) {
}
function DeliverChangeRecords() {
- while (observationState.pendingObservers.length) {
+ while (observationState.pendingObservers) {
var pendingObservers = observationState.pendingObservers;
- observationState.pendingObservers = new InternalArray;
+ observationState.pendingObservers = null;
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e0cb8c929..5d9e161a7 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -230,7 +230,8 @@ void HeapObject::HeapObjectVerify() {
void HeapObject::VerifyHeapPointer(Object* p) {
CHECK(p->IsHeapObject());
- CHECK(HEAP->Contains(HeapObject::cast(p)));
+ HeapObject* ho = HeapObject::cast(p);
+ CHECK(ho->GetHeap()->Contains(ho));
}
@@ -328,20 +329,27 @@ void JSObject::JSObjectVerify() {
}
}
}
- CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- (elements() == GetHeap()->empty_fixed_array())),
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map()));
- CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
+
+ // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
+ // allocation folding is turned off.
+ if (reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map()) {
+ CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
+ (elements() == GetHeap()->empty_fixed_array())),
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map()));
+ CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
+ }
}
void Map::MapVerify() {
- CHECK(!HEAP->InNewSpace(this));
+ Heap* heap = GetHeap();
+ CHECK(!heap->InNewSpace(this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
- instance_size() < HEAP->Capacity()));
+ instance_size() < heap->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
@@ -501,7 +509,7 @@ void JSDate::JSDateVerify() {
}
if (cache_stamp()->IsSmi()) {
CHECK(Smi::cast(cache_stamp())->value() <=
- Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
+ Smi::cast(GetIsolate()->date_cache()->stamp())->value());
}
}
@@ -523,7 +531,7 @@ void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
if (IsInternalizedString()) {
- CHECK(!HEAP->InNewSpace(this));
+ CHECK(!GetHeap()->InNewSpace(this));
}
if (IsConsString()) {
ConsString::cast(this)->ConsStringVerify();
@@ -615,7 +623,7 @@ void Oddball::OddballVerify() {
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- CHECK(number == HEAP->nan_value());
+ CHECK(number == HeapObject::cast(number)->GetHeap()->nan_value());
} else {
CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
@@ -675,9 +683,14 @@ void Code::VerifyEmbeddedMapsDependency() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
- CHECK(elements()->IsUndefined() ||
- elements()->IsFixedArray() ||
- elements()->IsFixedDoubleArray());
+ // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
+ // allocation folding is turned off.
+ if (reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map()) {
+ CHECK(elements()->IsUndefined() ||
+ elements()->IsFixedArray() ||
+ elements()->IsFixedDoubleArray());
+ }
}
@@ -856,6 +869,7 @@ void AccessorPair::AccessorPairVerify() {
CHECK(IsAccessorPair());
VerifyPointer(getter());
VerifyPointer(setter());
+ VerifySmiField(kAccessFlagsOffset);
}
@@ -888,6 +902,7 @@ void CallHandlerInfo::CallHandlerInfoVerify() {
void TemplateInfo::TemplateInfoVerify() {
VerifyPointer(tag());
VerifyPointer(property_list());
+ VerifyPointer(property_accessors());
}
@@ -896,7 +911,6 @@ void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
TemplateInfoVerify();
VerifyPointer(serial_number());
VerifyPointer(call_code());
- VerifyPointer(property_accessors());
VerifyPointer(prototype_template());
VerifyPointer(parent_template());
VerifyPointer(named_property_handler());
@@ -1042,7 +1056,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
- Heap* heap = HEAP;
+ Heap* heap = GetHeap();
for (int i = 0; i < len; i++) {
if (e->get(i) == heap->the_hole_value()) holes++;
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 2841ccc3f..89abe5043 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -917,17 +917,17 @@ bool Object::HasSpecificClassOf(String* name) {
}
-MaybeObject* Object::GetElement(uint32_t index) {
+MaybeObject* Object::GetElement(Isolate* isolate, uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(AllowHeapAllocation::IsAllowed());
- return GetElementWithReceiver(this, index);
+ return GetElementWithReceiver(isolate, this, index);
}
-Object* Object::GetElementNoExceptionThrown(uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(this, index);
+Object* Object::GetElementNoExceptionThrown(Isolate* isolate, uint32_t index) {
+ MaybeObject* maybe = GetElementWithReceiver(isolate, this, index);
ASSERT(!maybe->IsFailure());
Object* result = NULL; // Initialization to please compiler.
maybe->ToObject(&result);
@@ -1185,7 +1185,6 @@ Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
return heap;
}
@@ -1354,17 +1353,16 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
}
-MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
- ValidateElements();
- ElementsKind elements_kind = map()->elements_kind();
+void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
+ object->ValidateElements();
+ ElementsKind elements_kind = object->map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
- return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
+ TransitionElementsKind(object, FAST_HOLEY_ELEMENTS);
} else {
- return TransitionElementsKind(FAST_ELEMENTS);
+ TransitionElementsKind(object, FAST_ELEMENTS);
}
}
- return this;
}
@@ -1572,14 +1570,27 @@ MaybeObject* JSObject::MigrateInstance() {
// Converting any field to the most specific type will cause the
// GeneralizeFieldRepresentation algorithm to create the most general existing
// transition that matches the object. This achieves what is needed.
- return GeneralizeFieldRepresentation(0, Representation::None());
+ Map* original_map = map();
+ MaybeObject* maybe_result = GeneralizeFieldRepresentation(
+ 0, Representation::None(), ALLOW_AS_CONSTANT);
+ JSObject* result;
+ if (FLAG_trace_migration && maybe_result->To(&result)) {
+ PrintInstanceMigration(stdout, original_map, result->map());
+ }
+ return maybe_result;
}
MaybeObject* JSObject::TryMigrateInstance() {
Map* new_map = map()->CurrentMapForDeprecated();
if (new_map == NULL) return Smi::FromInt(0);
- return MigrateToMap(new_map);
+ Map* original_map = map();
+ MaybeObject* maybe_result = MigrateToMap(new_map);
+ JSObject* result;
+ if (FLAG_trace_migration && maybe_result->To(&result)) {
+ PrintInstanceMigration(stdout, original_map, result->map());
+ }
+ return maybe_result;
}
@@ -1857,14 +1868,15 @@ bool JSObject::HasFastProperties() {
}
-bool JSObject::TooManyFastProperties(int properties,
- JSObject::StoreFromKeyed store_mode) {
+bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) {
// Allow extra fast properties if the object has more than
- // kFastPropertiesSoftLimit in-object properties. When this is the case,
- // it is very unlikely that the object is being used as a dictionary
- // and there is a good chance that allowing more map transitions
- // will be worth it.
- int inobject = map()->inobject_properties();
+ // kFastPropertiesSoftLimit in-object properties. When this is the case, it is
+ // very unlikely that the object is being used as a dictionary and there is a
+ // good chance that allowing more map transitions will be worth it.
+ Map* map = this->map();
+ if (map->unused_property_fields() != 0) return false;
+
+ int inobject = map->inobject_properties();
int limit;
if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
@@ -1872,7 +1884,7 @@ bool JSObject::TooManyFastProperties(int properties,
} else {
limit = Max(inobject, kFastPropertiesSoftLimit);
}
- return properties > limit;
+ return properties()->length() > limit;
}
@@ -1951,7 +1963,7 @@ bool FixedArray::is_the_hole(int index) {
void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
@@ -1960,7 +1972,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1986,8 +1998,8 @@ inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
double FixedDoubleArray::get_scalar(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
ASSERT(!is_the_hole_nan(result));
@@ -1995,8 +2007,8 @@ double FixedDoubleArray::get_scalar(int index) {
}
int64_t FixedDoubleArray::get_representation(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
}
@@ -2011,8 +2023,8 @@ MaybeObject* FixedDoubleArray::get(int index) {
void FixedDoubleArray::set(int index, double value) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
WRITE_DOUBLE_FIELD(this, offset, value);
@@ -2020,8 +2032,8 @@ void FixedDoubleArray::set(int index, double value) {
void FixedDoubleArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
}
@@ -2045,7 +2057,7 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -2056,7 +2068,7 @@ void FixedArray::set(int index,
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
@@ -2070,43 +2082,36 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!array->GetHeap()->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- set_undefined(GetHeap(), index);
-}
-
-
-void FixedArray::set_undefined(Heap* heap, int index) {
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->undefined_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- heap->undefined_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->undefined_value());
}
void FixedArray::set_null(int index) {
- set_null(GetHeap(), index);
-}
-
-
-void FixedArray::set_null(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->null_value());
}
void FixedArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->the_hole_value());
@@ -2125,7 +2130,7 @@ Object** FixedArray::data_start() {
bool DescriptorArray::IsEmpty() {
ASSERT(length() >= kFirstIndex ||
- this == HEAP->empty_descriptor_array());
+ this == GetHeap()->empty_descriptor_array());
return length() < kFirstIndex;
}
@@ -2353,6 +2358,7 @@ PropertyType DescriptorArray::GetType(int descriptor_number) {
int DescriptorArray::GetFieldIndex(int descriptor_number) {
+ ASSERT(GetDetails(descriptor_number).type() == FIELD);
return GetDetails(descriptor_number).field_index();
}
@@ -4187,7 +4193,7 @@ static MaybeObject* EnsureHasTransitionArray(Map* map) {
TransitionArray* transitions;
MaybeObject* maybe_transitions;
if (!map->HasTransitionArray()) {
- maybe_transitions = TransitionArray::Allocate(0);
+ maybe_transitions = TransitionArray::Allocate(map->GetIsolate(), 0);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
transitions->set_back_pointer_storage(map->GetBackPointer());
} else if (!map->transitions()->IsFullTransitionArray()) {
@@ -4440,6 +4446,7 @@ ACCESSORS(Box, value, Object, kValueOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+ACCESSORS_TO_SMI(AccessorPair, access_flags, kAccessFlagsOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -4457,11 +4464,10 @@ ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
+ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
- kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
kPrototypeTemplateOffset)
ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
@@ -4560,6 +4566,10 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
kReadOnlyPrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
+ kRemovePrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
+ kDoNotCacheBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
@@ -4597,7 +4607,8 @@ SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
#else
@@ -4646,7 +4657,9 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
compiler_hints,
kCompilerHintsOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, opt_count, kOptCountOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
+ opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
@@ -4829,7 +4842,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
return code() !=
- Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+ GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
@@ -4893,6 +4906,24 @@ void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
}
+int SharedFunctionInfo::opt_count() {
+ return OptCountBits::decode(opt_count_and_bailout_reason());
+}
+
+
+void SharedFunctionInfo::set_opt_count(int opt_count) {
+ set_opt_count_and_bailout_reason(
+ OptCountBits::update(opt_count_and_bailout_reason(), opt_count));
+}
+
+
+BailoutReason SharedFunctionInfo::DisableOptimizationReason() {
+ BailoutReason reason = static_cast<BailoutReason>(
+ DisabledOptimizationReasonBits::decode(opt_count_and_bailout_reason()));
+ return reason;
+}
+
+
bool SharedFunctionInfo::has_deoptimization_support() {
Code* code = this->code();
return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
@@ -4939,15 +4970,9 @@ bool JSFunction::IsMarkedForLazyRecompilation() {
}
-bool JSFunction::IsMarkedForInstallingRecompiledCode() {
- return code() == GetIsolate()->builtins()->builtin(
- Builtins::kInstallRecompiledCode);
-}
-
-
-bool JSFunction::IsMarkedForParallelRecompilation() {
+bool JSFunction::IsMarkedForConcurrentRecompilation() {
return code() == GetIsolate()->builtins()->builtin(
- Builtins::kParallelRecompile);
+ Builtins::kConcurrentRecompile);
}
@@ -4964,7 +4989,7 @@ Code* JSFunction::code() {
void JSFunction::set_code(Code* value) {
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
@@ -4975,7 +5000,7 @@ void JSFunction::set_code(Code* value) {
void JSFunction::set_code_no_write_barrier(Code* value) {
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
@@ -4993,6 +5018,7 @@ void JSFunction::ReplaceCode(Code* code) {
context()->native_context()->AddOptimizedFunction(this);
}
if (was_optimized && !is_optimized) {
+ // TODO(titzer): linear in the number of optimized functions; fix!
context()->native_context()->RemoveOptimizedFunction(this);
}
}
@@ -5124,7 +5150,7 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
}
@@ -5255,6 +5281,20 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
}
+Object* Code::next_code_link() {
+ CHECK(kind() == OPTIMIZED_FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
+ CHECK(kind() == OPTIMIZED_FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
kind() == BINARY_OP_IC || kind() == LOAD_IC);
@@ -5276,25 +5316,6 @@ void Code::set_stub_info(int value) {
}
-Object* Code::code_to_deoptimize_link() {
- // Optimized code should not have type feedback.
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_FIELD(this, kTypeFeedbackInfoOffset);
-}
-
-
-void Code::set_code_to_deoptimize_link(Object* value) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
-}
-
-
-Object** Code::code_to_deoptimize_link_slot() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return HeapObject::RawField(this, kTypeFeedbackInfoOffset);
-}
-
-
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
@@ -5836,6 +5857,36 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
}
+void AccessorPair::set_access_flags(v8::AccessControl access_control) {
+ int current = access_flags()->value();
+ current = BooleanBit::set(current,
+ kProhibitsOverwritingBit,
+ access_control & PROHIBITS_OVERWRITING);
+ current = BooleanBit::set(current,
+ kAllCanReadBit,
+ access_control & ALL_CAN_READ);
+ current = BooleanBit::set(current,
+ kAllCanWriteBit,
+ access_control & ALL_CAN_WRITE);
+ set_access_flags(Smi::FromInt(current));
+}
+
+
+bool AccessorPair::all_can_read() {
+ return BooleanBit::get(access_flags(), kAllCanReadBit);
+}
+
+
+bool AccessorPair::all_can_write() {
+ return BooleanBit::get(access_flags(), kAllCanWriteBit);
+}
+
+
+bool AccessorPair::prohibits_overwriting() {
+ return BooleanBit::get(access_flags(), kProhibitsOverwritingBit);
+}
+
+
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
@@ -6141,7 +6192,6 @@ SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
Relocatable::Relocatable(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
isolate_ = isolate;
prev_ = isolate->relocatable_top();
isolate->set_relocatable_top(this);
@@ -6149,7 +6199,6 @@ Relocatable::Relocatable(Isolate* isolate) {
Relocatable::~Relocatable() {
- ASSERT(isolate_ == Isolate::Current());
ASSERT_EQ(isolate_->relocatable_top(), this);
isolate_->set_relocatable_top(prev_);
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 7b6f7a478..0b8fdfda0 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -985,6 +985,8 @@ void AccessorPair::AccessorPairPrint(FILE* out) {
getter()->ShortPrint(out);
PrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
+ PrintF(out, "\n - flag: ");
+ access_flags()->ShortPrint(out);
}
@@ -1068,6 +1070,8 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
tag()->ShortPrint(out);
PrintF(out, "\n - property_list: ");
property_list()->ShortPrint(out);
+ PrintF(out, "\n - property_accessors: ");
+ property_accessors()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
PrintF(out, "\n - internal_field_count: ");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 9398d6dfe..46cc9d798 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -304,7 +304,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
&& (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(rinfo->pc());
+ IC::Clear(target->GetIsolate(), rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
@@ -848,8 +848,9 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kTypeFeedbackInfoOffset);
RelocIterator it(this, mode_mask);
+ Isolate* isolate = this->GetIsolate();
for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
+ it.rinfo()->Visit(isolate, v);
}
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 23eac77f7..35646b8be 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -32,6 +32,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "date.h"
@@ -40,6 +41,7 @@
#include "full-codegen.h"
#include "hydrogen.h"
#include "isolate-inl.h"
+#include "log.h"
#include "objects-inl.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
@@ -83,23 +85,19 @@ MaybeObject* Object::ToObject(Context* native_context) {
}
-MaybeObject* Object::ToObject() {
+MaybeObject* Object::ToObject(Isolate* isolate) {
if (IsJSReceiver()) {
return this;
} else if (IsNumber()) {
- Isolate* isolate = Isolate::Current();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->string_function(), this);
} else if (IsSymbol()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->symbol_function(), this);
}
@@ -135,7 +133,7 @@ void Object::Lookup(Name* name, LookupResult* result) {
} else if (IsBoolean()) {
holder = native_context->boolean_function()->instance_prototype();
} else {
- Isolate::Current()->PushStackTraceAndDie(
+ result->isolate()->PushStackTraceAndDie(
0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
}
}
@@ -341,7 +339,7 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(receiver, callback->data);
+ MaybeObject* value = (callback->getter)(isolate, receiver, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value;
}
@@ -368,7 +366,8 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
}
ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ v8::AccessorGetterCallback call_fun =
+ v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
@@ -421,24 +420,22 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
}
-Handle<Object> Object::GetProperty(Handle<Object> object, Handle<Name> name) {
+Handle<Object> Object::GetProperty(Handle<Object> object,
+ Handle<Name> name) {
// TODO(rossberg): The index test should not be here but in the GetProperty
// method (or somewhere else entirely). Needs more global clean-up.
uint32_t index;
+ Isolate* isolate = name->GetIsolate();
if (name->AsArrayIndex(&index))
- return GetElement(object, index);
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ return GetElement(isolate, object, index);
CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
}
-Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
+Handle<Object> Object::GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION(isolate, object->GetElement(isolate, index), Object);
}
@@ -451,14 +448,17 @@ MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
}
-MaybeObject* JSProxy::SetElementWithHandler(JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return SetPropertyWithHandler(receiver, name, value, NONE, strict_mode);
+Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ CALL_HEAP_FUNCTION(isolate,
+ proxy->SetPropertyWithHandler(
+ *receiver, *name, *value, NONE, strict_mode),
+ Object);
}
@@ -487,8 +487,8 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
#endif
bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
+ Handle<Object> result = Execution::Call(
+ isolate, fun, self, 0, NULL, &has_pending_exception, true);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *result;
@@ -513,6 +513,12 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
return result->holder()->GetPropertyWithCallback(
receiver, result->GetCallbackObject(), name);
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return result->holder()->GetPropertyWithCallback(
+ receiver, result->GetCallbackObject(), name);
+ }
}
break;
}
@@ -573,6 +579,11 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
if (info->all_can_read()) {
return result->GetAttributes();
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return result->GetAttributes();
+ }
}
break;
}
@@ -786,9 +797,7 @@ Handle<Object> Object::GetProperty(Handle<Object> object,
LookupResult* result,
Handle<Name> key,
PropertyAttributes* attributes) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ Isolate* isolate = result->isolate();
CALL_HEAP_FUNCTION(
isolate,
object->GetProperty(*receiver, result, *key, attributes),
@@ -801,9 +810,7 @@ MaybeObject* Object::GetPropertyOrFail(Handle<Object> object,
LookupResult* result,
Handle<Name> key,
PropertyAttributes* attributes) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ Isolate* isolate = result->isolate();
CALL_HEAP_FUNCTION_PASS_EXCEPTION(
isolate,
object->GetProperty(*receiver, result, *key, attributes));
@@ -816,7 +823,8 @@ MaybeObject* Object::GetProperty(Object* receiver,
PropertyAttributes* attributes) {
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
+
Isolate* isolate = name->GetIsolate();
Heap* heap = isolate->heap();
@@ -894,10 +902,9 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
-MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- Isolate* isolate = IsSmi()
- ? Isolate::Current()
- : HeapObject::cast(this)->GetIsolate();
+MaybeObject* Object::GetElementWithReceiver(Isolate* isolate,
+ Object* receiver,
+ uint32_t index) {
Heap* heap = isolate->heap();
Object* holder = this;
@@ -1449,6 +1456,66 @@ void JSObject::PrintElementsTransition(
}
+void Map::PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation) {
+ PrintF(file, "[generalizing ");
+ constructor_name()->PrintOn(file);
+ PrintF(file, "] ");
+ String::cast(instance_descriptors()->GetKey(modify_index))->PrintOn(file);
+ if (constant_to_field) {
+ PrintF(file, ":c->f");
+ } else {
+ PrintF(file, ":%s->%s",
+ old_representation.Mnemonic(),
+ new_representation.Mnemonic());
+ }
+ PrintF(file, " (");
+ if (strlen(reason) > 0) {
+ PrintF(file, "%s", reason);
+ } else {
+ PrintF(file, "+%i maps", descriptors - split);
+ }
+ PrintF(file, ") [");
+ JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ PrintF(file, "]\n");
+}
+
+
+void JSObject::PrintInstanceMigration(FILE* file,
+ Map* original_map,
+ Map* new_map) {
+ PrintF(file, "[migrating ");
+ map()->constructor_name()->PrintOn(file);
+ PrintF(file, "] ");
+ DescriptorArray* o = original_map->instance_descriptors();
+ DescriptorArray* n = new_map->instance_descriptors();
+ for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
+ Representation o_r = o->GetDetails(i).representation();
+ Representation n_r = n->GetDetails(i).representation();
+ if (!o_r.Equals(n_r)) {
+ String::cast(o->GetKey(i))->PrintOn(file);
+ PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
+ } else if (o->GetDetails(i).type() == CONSTANT &&
+ n->GetDetails(i).type() == FIELD) {
+ Name* name = o->GetKey(i);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ PrintF(file, "???");
+ }
+ PrintF(file, " ");
+ }
+ }
+ PrintF(file, "\n");
+}
+
+
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
Heap* heap = GetHeap();
if (!heap->Contains(this)) {
@@ -1783,14 +1850,14 @@ String* JSReceiver::class_name() {
}
-String* JSReceiver::constructor_name() {
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
+String* Map::constructor_name() {
+ if (constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(this->constructor());
String* name = String::cast(constructor->shared()->name());
if (name->length() > 0) return name;
String* inferred_name = constructor->shared()->inferred_name();
if (inferred_name->length() > 0) return inferred_name;
- Object* proto = GetPrototype();
+ Object* proto = prototype();
if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
}
// TODO(rossberg): what about proxies?
@@ -1799,6 +1866,11 @@ String* JSReceiver::constructor_name() {
}
+String* JSReceiver::constructor_name() {
+ return map()->constructor_name();
+}
+
+
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
Name* name,
Object* value,
@@ -1828,30 +1900,12 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
}
-static bool IsIdentifier(UnicodeCache* cache, Name* name) {
- // Checks whether the buffer contains an identifier (no escape).
- if (!name->IsString()) return false;
- String* string = String::cast(name);
- if (string->length() == 0) return false;
- ConsStringIteratorOp op;
- StringCharacterStream stream(string, &op);
- if (!cache->IsIdentifierStart(stream.GetNext())) {
- return false;
- }
- while (stream.HasMore()) {
- if (!cache->IsIdentifierPart(stream.GetNext())) {
- return false;
- }
- }
- return true;
-}
-
-
MaybeObject* JSObject::AddFastProperty(Name* name,
Object* value,
PropertyAttributes attributes,
StoreFromKeyed store_mode,
- ValueType value_type) {
+ ValueType value_type,
+ TransitionFlag flag) {
ASSERT(!IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
map()->instance_descriptors()->Search(
@@ -1861,15 +1915,10 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
// hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
Isolate* isolate = GetHeap()->isolate();
- if ((!name->IsSymbol() && !IsIdentifier(isolate->unicode_cache(), name)
- && name != isolate->heap()->hidden_string()) ||
- (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), store_mode))) {
- Object* obj;
- MaybeObject* maybe_obj =
+ if (!name->IsCacheable(isolate) || TooManyFastProperties(store_mode)) {
+ MaybeObject* maybe_failure =
NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-
+ if (maybe_failure->IsFailure()) return maybe_failure;
return AddSlowProperty(name, value, attributes);
}
@@ -1882,66 +1931,36 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
FieldDescriptor new_field(name, index, attributes, representation);
- ASSERT(index < map()->inobject_properties() ||
- (index - map()->inobject_properties()) < properties()->length() ||
- map()->unused_property_fields() == 0);
-
- FixedArray* values = NULL;
-
- // TODO(verwaest): Merge with AddFastPropertyUsingMap.
- if (map()->unused_property_fields() == 0) {
- // Make room for the new value
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->To(&values)) return maybe_values;
- }
-
- TransitionFlag flag = INSERT_TRANSITION;
-
- Heap* heap = isolate->heap();
-
- Object* storage;
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
-
- // Note that Map::CopyAddDescriptor has side-effects, the new map is already
- // inserted in the transition tree. No more allocations that might fail are
- // allowed after this point.
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (map()->unused_property_fields() == 0) {
- ASSERT(values != NULL);
- set_properties(values);
- new_map->set_unused_property_fields(kFieldsAdded - 1);
- } else {
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
+ int unused_property_fields = map()->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += kFieldsAdded;
}
+ new_map->set_unused_property_fields(unused_property_fields);
- set_map(new_map);
-
- FastPropertyAtPut(index, storage);
- return value;
+ return AddFastPropertyUsingMap(new_map, name, value, index, representation);
}
MaybeObject* JSObject::AddConstantProperty(
Name* name,
Object* constant,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ TransitionFlag initial_flag) {
// Allocate new instance descriptors with (name, constant) added
ConstantDescriptor d(name, constant, attributes);
TransitionFlag flag =
- // Do not add transitions to global objects.
+ // Do not add transitions to global objects.
(IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
attributes != NONE)
? OMIT_TRANSITION
- : INSERT_TRANSITION;
+ : initial_flag;
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
@@ -2001,7 +2020,8 @@ MaybeObject* JSObject::AddProperty(Name* name,
JSReceiver::StoreFromKeyed store_mode,
ExtensibilityCheck extensibility_check,
ValueType value_type,
- StoreMode mode) {
+ StoreMode mode,
+ TransitionFlag transition_flag) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -2028,10 +2048,10 @@ MaybeObject* JSObject::AddProperty(Name* name,
// !value->IsTheHole() &&
// !value->IsConsString()) {
if (value->IsJSFunction()) {
- result = AddConstantProperty(name, value, attributes);
+ result = AddConstantProperty(name, value, attributes, transition_flag);
} else {
result = AddFastProperty(
- name, value, attributes, store_mode, value_type);
+ name, value, attributes, store_mode, value_type, transition_flag);
}
} else {
// Normalize the object to prevent very large instance descriptors.
@@ -2071,7 +2091,8 @@ void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
}
Handle<Object> args[] = { type, object, name, old_value };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_notify_change()),
isolate->factory()->undefined_value(),
old_value->IsTheHole() ? 3 : 4, args,
&threw);
@@ -2083,6 +2104,7 @@ void JSObject::DeliverChangeRecords(Isolate* isolate) {
ASSERT(isolate->observer_delivery_pending());
bool threw = false;
Execution::Call(
+ isolate,
isolate->observers_deliver_changes(),
isolate->factory()->undefined_value(),
0,
@@ -2098,7 +2120,6 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check,
StoreMode mode) {
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
@@ -2110,13 +2131,12 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
return SetProperty(&result, name, value, attributes, strict_mode);
}
bool done = false;
- MaybeObject* result_object;
- result_object =
+ MaybeObject* result_object =
SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
if (done) return result_object;
// Add a new real property.
return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check,
+ MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK,
OPTIMAL_REPRESENTATION, mode);
}
@@ -2137,105 +2157,6 @@ MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
}
-MaybeObject* JSObject::ConvertTransitionToMapTransition(
- int transition_index,
- Name* name,
- Object* new_value,
- PropertyAttributes attributes) {
- Map* old_map = map();
- Map* old_target = old_map->GetTransition(transition_index);
- Object* result;
-
- MaybeObject* maybe_result = ConvertDescriptorToField(
- name, new_value, attributes, OMIT_TRANSITION_KEEP_REPRESENTATIONS);
- if (!maybe_result->To(&result)) return maybe_result;
-
- if (!HasFastProperties()) return result;
-
- // This method should only be used to convert existing transitions.
- Map* new_map = map();
-
- // TODO(verwaest): From here on we lose existing map transitions, causing
- // invalid back pointers. This will change once we can store multiple
- // transitions with the same key.
- bool owned_descriptors = old_map->owns_descriptors();
- if (owned_descriptors ||
- old_target->instance_descriptors() == old_map->instance_descriptors()) {
- // Since the conversion above generated a new fast map with an additional
- // property which can be shared as well, install this descriptor pointer
- // along the entire chain of smaller maps.
- Map* map;
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- for (Object* current = old_map;
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (map->instance_descriptors() != old_descriptors) break;
- map->SetEnumLength(Map::kInvalidEnumCache);
- map->set_instance_descriptors(new_descriptors);
- }
- old_map->set_owns_descriptors(false);
- }
-
- old_target->DeprecateTransitionTree();
-
- old_map->SetTransition(transition_index, new_map);
- new_map->SetBackPointer(old_map);
- return result;
-}
-
-
-MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
- Object* new_value,
- PropertyAttributes attributes,
- TransitionFlag flag) {
- if (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
- Object* obj;
- MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return ReplaceSlowProperty(name, new_value, attributes);
- }
-
- Representation representation = IsJSContextExtensionObject()
- ? Representation::Tagged() : new_value->OptimalRepresentation();
- int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, representation);
-
- // Make a new map for the object.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- // Make new properties array if necessary.
- FixedArray* new_properties = NULL;
- int new_unused_property_fields = map()->unused_property_fields() - 1;
- if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- MaybeObject* maybe_new_properties =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
- }
-
- Heap* heap = GetHeap();
- Object* storage;
- MaybeObject* maybe_storage =
- new_value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
-
- // Update pointers to commit changes.
- // Object points to the new map.
- new_map->set_unused_property_fields(new_unused_property_fields);
- set_map(new_map);
- if (new_properties != NULL) {
- set_properties(new_properties);
- }
- FastPropertyAtPut(index, new_value);
- return new_value;
-}
-
-
const char* Representation::Mnemonic() const {
switch (kind_) {
case kNone: return "v";
@@ -2267,9 +2188,9 @@ static void ZapEndOfFixedArray(Address new_end, int to_trim) {
template<RightTrimMode trim_mode>
static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
+ ASSERT(!heap->lo_space()->Contains(elms));
const int len = elms->length();
@@ -2382,6 +2303,10 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.type() == CALLBACKS) {
+ ASSERT(details.representation().IsTagged());
+ continue;
+ }
ASSERT(old_details.type() == CONSTANT ||
old_details.type() == FIELD);
Object* value = old_details.type() == CONSTANT
@@ -2438,10 +2363,11 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
MaybeObject* JSObject::GeneralizeFieldRepresentation(
int modify_index,
- Representation new_representation) {
+ Representation new_representation,
+ StoreMode store_mode) {
Map* new_map;
- MaybeObject* maybe_new_map =
- map()->GeneralizeRepresentation(modify_index, new_representation);
+ MaybeObject* maybe_new_map = map()->GeneralizeRepresentation(
+ modify_index, new_representation, store_mode);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
if (map() == new_map) return this;
@@ -2459,16 +2385,40 @@ int Map::NumberOfFields() {
}
-MaybeObject* Map::CopyGeneralizeAllRepresentations() {
+MaybeObject* Map::CopyGeneralizeAllRepresentations(
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason) {
Map* new_map;
MaybeObject* maybe_map = this->Copy();
if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->instance_descriptors()->InitializeRepresentations(
- Representation::Tagged());
+ DescriptorArray* descriptors = new_map->instance_descriptors();
+ descriptors->InitializeRepresentations(Representation::Tagged());
+
+ // Unless the instance is being migrated, ensure that modify_index is a field.
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ if (store_mode == FORCE_FIELD && details.type() != FIELD) {
+ FieldDescriptor d(descriptors->GetKey(modify_index),
+ new_map->NumberOfFields(),
+ attributes,
+ Representation::Tagged());
+ d.SetSortedKeyIndex(details.pointer());
+ descriptors->Set(modify_index, &d);
+ int unused_property_fields = new_map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ new_map->set_unused_property_fields(unused_property_fields);
+ }
+
if (FLAG_trace_generalization) {
- PrintF("failed generalization %p -> %p\n",
- static_cast<void*>(this), static_cast<void*>(new_map));
+ PrintGeneralization(stdout, reason, modify_index,
+ new_map->NumberOfOwnDescriptors(),
+ new_map->NumberOfOwnDescriptors(),
+ details.type() == CONSTANT && store_mode == FORCE_FIELD,
+ Representation::Tagged(), Representation::Tagged());
}
return new_map;
}
@@ -2613,11 +2563,12 @@ Map* Map::FindLastMatchMap(int verbatim,
// - Otherwise, invalidate the outdated transition target from |updated|, and
// replace its transition tree with a new branch for the updated descriptors.
MaybeObject* Map::GeneralizeRepresentation(int modify_index,
- Representation new_representation) {
+ Representation new_representation,
+ StoreMode store_mode) {
Map* old_map = this;
DescriptorArray* old_descriptors = old_map->instance_descriptors();
- Representation old_representation =
- old_descriptors->GetDetails(modify_index).representation();
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ Representation old_representation = old_details.representation();
// It's fine to transition from None to anything but double without any
// modification to the object, because the default uninitialized value for
@@ -2626,12 +2577,6 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
if (old_representation.IsNone() &&
!new_representation.IsNone() &&
!new_representation.IsDouble()) {
- if (FLAG_trace_generalization) {
- PrintF("initializing representation %i: %p -> %s\n",
- modify_index,
- static_cast<void*>(this),
- new_representation.Mnemonic());
- }
old_descriptors->SetRepresentation(modify_index, new_representation);
return old_map;
}
@@ -2641,40 +2586,46 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
// Check the state of the root map.
if (!old_map->EquivalentToForTransition(root_map)) {
- return CopyGeneralizeAllRepresentations();
+ return CopyGeneralizeAllRepresentations(
+ modify_index, store_mode, old_details.attributes(), "not equivalent");
}
int verbatim = root_map->NumberOfOwnDescriptors();
+ if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
+ return CopyGeneralizeAllRepresentations(
+ modify_index, store_mode,
+ old_details.attributes(), "root modification");
+ }
+
Map* updated = root_map->FindUpdatedMap(
verbatim, descriptors, old_descriptors);
- if (updated == NULL) return CopyGeneralizeAllRepresentations();
+ if (updated == NULL) {
+ return CopyGeneralizeAllRepresentations(
+ modify_index, store_mode, old_details.attributes(), "incompatible");
+ }
DescriptorArray* updated_descriptors = updated->instance_descriptors();
int valid = updated->NumberOfOwnDescriptors();
+
+ // Directly change the map if the target map is more general. Ensure that the
+ // target type of the modify_index is a FIELD, unless we are migrating.
if (updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, old_descriptors)) {
+ verbatim, valid, descriptors, old_descriptors) &&
+ (store_mode == ALLOW_AS_CONSTANT ||
+ updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
Representation updated_representation =
updated_descriptors->GetDetails(modify_index).representation();
- if (new_representation.fits_into(updated_representation)) {
- if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsNone())) {
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- PrintF("migrating to existing map %p(%s) -> %p(%s)\n",
- static_cast<void*>(this),
- old_details.representation().Mnemonic(),
- static_cast<void*>(updated),
- updated_representation.Mnemonic());
- }
- return updated;
- }
+ if (new_representation.fits_into(updated_representation)) return updated;
}
DescriptorArray* new_descriptors;
MaybeObject* maybe_descriptors = updated_descriptors->Merge(
- verbatim, valid, descriptors, old_descriptors);
+ verbatim, valid, descriptors, modify_index, store_mode, old_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ ASSERT(store_mode == ALLOW_AS_CONSTANT ||
+ new_descriptors->GetDetails(modify_index).type() == FIELD);
old_representation =
new_descriptors->GetDetails(modify_index).representation();
@@ -2696,15 +2647,12 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
split_map->DeprecateTarget(
old_descriptors->GetKey(descriptor), new_descriptors);
- if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsNone())) {
- PrintF("migrating to new map %i: %p(%s) -> %p(%s) (%i steps)\n",
- modify_index,
- static_cast<void*>(this),
- old_representation.Mnemonic(),
- static_cast<void*>(new_descriptors),
- updated_representation.Mnemonic(),
- descriptors - descriptor);
+ if (FLAG_trace_generalization) {
+ PrintGeneralization(
+ stdout, "", modify_index, descriptor, descriptors,
+ old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
+ store_mode == FORCE_FIELD,
+ old_representation, updated_representation);
}
Map* new_map = split_map;
@@ -2771,8 +2719,8 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
if (!interceptor->setter()->IsUndefined()) {
LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
- v8::NamedPropertySetter setter =
- v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
+ v8::NamedPropertySetterCallback setter =
+ v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
Handle<Object> value_unhole(value->IsTheHole() ?
isolate->heap()->undefined_value() :
value,
@@ -2787,8 +2735,7 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
this_handle->SetPropertyPostInterceptor(*name_handle,
*value_handle,
attributes,
- strict_mode,
- PERFORM_EXTENSIBILITY_CHECK);
+ strict_mode);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
@@ -2852,7 +2799,8 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(this, value, callback->data);
+ MaybeObject* obj = (callback->setter)(
+ isolate, this, value, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (obj->IsFailure()) return obj;
return *value_handle;
@@ -2874,7 +2822,8 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<String> key(String::cast(name));
LOG(isolate, ApiNamedPropertyAccess("store", this, name));
@@ -2932,7 +2881,8 @@ MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
#endif
bool has_pending_exception;
Handle<Object> argv[] = { value_handle };
- Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Execution::Call(
+ isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *value_handle;
@@ -3056,48 +3006,101 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
-void Map::AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors) {
- Isolate* isolate = map->GetIsolate();
- Handle<DescriptorArray> array(map->instance_descriptors());
- NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
-
- ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks);
+template<class T>
+static int AppendUniqueCallbacks(NeanderArray* callbacks,
+ Handle<typename T::Array> array,
+ int valid_descriptors) {
+ int nof_callbacks = callbacks->length();
+ Isolate* isolate = array->GetIsolate();
// Ensure the keys are unique names before writing them into the
// instance descriptor. Since it may cause a GC, it has to be done before we
// temporarily put the heap in an invalid state while appending descriptors.
for (int i = 0; i < nof_callbacks; ++i) {
- Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
- if (!entry->name()->IsUniqueName()) {
- Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
- Handle<String>(String::cast(entry->name())));
- entry->set_name(*key);
- }
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
+ if (entry->name()->IsUniqueName()) continue;
+ Handle<String> key =
+ isolate->factory()->InternalizedStringFromString(
+ Handle<String>(String::cast(entry->name())));
+ entry->set_name(*key);
}
- int nof = map->NumberOfOwnDescriptors();
-
// Fill in new callback descriptors. Process the callbacks from
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
- AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
+ AccessorInfo* entry = AccessorInfo::cast(callbacks->get(i));
Name* key = Name::cast(entry->name());
// Check if a descriptor with this name already exists before writing.
- if (array->Search(key, nof) == DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(key, entry, entry->property_attributes());
- array->Append(&desc);
- nof += 1;
+ if (!T::Contains(key, entry, valid_descriptors, array)) {
+ T::Insert(key, entry, valid_descriptors, array);
+ valid_descriptors++;
}
}
+ return valid_descriptors;
+}
+
+struct DescriptorArrayAppender {
+ typedef DescriptorArray Array;
+ static bool Contains(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<DescriptorArray> array) {
+ return array->Search(key, valid_descriptors) != DescriptorArray::kNotFound;
+ }
+ static void Insert(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<DescriptorArray> array) {
+ CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ array->Append(&desc);
+ }
+};
+
+
+struct FixedArrayAppender {
+ typedef FixedArray Array;
+ static bool Contains(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<FixedArray> array) {
+ for (int i = 0; i < valid_descriptors; i++) {
+ if (key == AccessorInfo::cast(array->get(i))->name()) return true;
+ }
+ return false;
+ }
+ static void Insert(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<FixedArray> array) {
+ array->set(valid_descriptors, entry);
+ }
+};
+
+
+void Map::AppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ int nof = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> array(map->instance_descriptors());
+ NeanderArray callbacks(descriptors);
+ ASSERT(array->NumberOfSlackDescriptors() >= callbacks.length());
+ nof = AppendUniqueCallbacks<DescriptorArrayAppender>(&callbacks, array, nof);
map->SetNumberOfOwnDescriptors(nof);
}
+int AccessorInfo::AppendUnique(Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors) {
+ NeanderArray callbacks(descriptors);
+ ASSERT(array->length() >= callbacks.length() + valid_descriptors);
+ return AppendUniqueCallbacks<FixedArrayAppender>(&callbacks,
+ array,
+ valid_descriptors);
+}
+
+
static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
ASSERT(!map.is_null());
for (int i = 0; i < maps->length(); ++i) {
@@ -3361,6 +3364,15 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
result->holder(),
strict_mode);
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return SetPropertyWithCallback(result->GetCallbackObject(),
+ name,
+ value,
+ result->holder(),
+ strict_mode);
+ }
}
break;
}
@@ -3485,9 +3497,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
// Emulate [[GetProperty]] semantics for proxies.
bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Handle<Object> desc = Execution::Call(
+ isolate, isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
// [[GetProperty]] requires to check that all properties are configurable.
@@ -3554,20 +3566,20 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> JSProxy::DeletePropertyWithHandler(
- Handle<JSProxy> object, Handle<Name> name, DeleteMode mode) {
- Isolate* isolate = object->GetIsolate();
+ Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return isolate->factory()->false_value();
Handle<Object> args[] = { name };
- Handle<Object> result = object->CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"delete", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Handle<Object>();
bool result_bool = result->BooleanValue();
if (mode == STRICT_DELETION && !result_bool) {
- Handle<Object> handler(object->handler(), isolate);
+ Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("delete"));
Handle<Object> args[] = { handler, trap_name };
@@ -3581,10 +3593,10 @@ Handle<Object> JSProxy::DeletePropertyWithHandler(
Handle<Object> JSProxy::DeleteElementWithHandler(
- Handle<JSProxy> object, uint32_t index, DeleteMode mode) {
- Isolate* isolate = object->GetIsolate();
+ Handle<JSProxy> proxy, uint32_t index, DeleteMode mode) {
+ Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(object, name, mode);
+ return JSProxy::DeletePropertyWithHandler(proxy, name, mode);
}
@@ -3610,9 +3622,9 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Handle<Object> desc = Execution::Call(
+ isolate, isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
if (has_pending_exception) return NONE;
// Convert result to PropertyAttributes.
@@ -3666,27 +3678,23 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
}
-void JSProxy::Fix() {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> self(this);
+void JSProxy::Fix(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
// Save identity hash.
- MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+ Handle<Object> hash = JSProxy::GetIdentityHash(proxy, OMIT_CREATION);
- if (IsJSFunctionProxy()) {
- isolate->factory()->BecomeJSFunction(self);
+ if (proxy->IsJSFunctionProxy()) {
+ isolate->factory()->BecomeJSFunction(proxy);
// Code will be set on the JavaScript side.
} else {
- isolate->factory()->BecomeJSObject(self);
+ isolate->factory()->BecomeJSObject(proxy);
}
- ASSERT(self->IsJSObject());
+ ASSERT(proxy->IsJSObject());
// Inherit identity, if it was present.
- Object* hash;
- if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
- Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, Smi::cast(hash));
+ if (hash->IsSmi()) {
+ JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy), Smi::cast(*hash));
}
}
@@ -3714,7 +3722,7 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
bool threw;
- return Execution::Call(trap, handler, argc, argv, &threw);
+ return Execution::Call(isolate, trap, handler, argc, argv, &threw);
}
@@ -3726,11 +3734,6 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
void JSObject::MigrateInstance(Handle<JSObject> object) {
- if (FLAG_trace_migration) {
- PrintF("migrating instance %p (%p)\n",
- static_cast<void*>(*object),
- static_cast<void*>(object->map()));
- }
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
object->MigrateInstance());
@@ -3738,11 +3741,6 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- if (FLAG_trace_migration) {
- PrintF("migrating instance (no new maps) %p (%p)\n",
- static_cast<void*>(*object),
- static_cast<void*>(object->map()));
- }
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->MigrateInstance(),
@@ -3752,14 +3750,148 @@ Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
int modify_index,
- Representation representation) {
+ Representation representation,
+ StoreMode store_mode) {
CALL_HEAP_FUNCTION(
map->GetIsolate(),
- map->GeneralizeRepresentation(modify_index, representation),
+ map->GeneralizeRepresentation(modify_index, representation, store_mode),
Map);
}
+static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Map* transition_map = lookup->GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == CALLBACKS || attributes != details.attributes()) {
+ // AddProperty will either normalize the object, or create a new fast copy
+ // of the map. If we get a fast copy of the map, all field representations
+ // will be tagged since the transition is omitted.
+ return lookup->holder()->AddProperty(
+ *name, *value, attributes, kNonStrictMode,
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
+ JSReceiver::OMIT_EXTENSIBILITY_CHECK,
+ JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
+ }
+
+ // Keep the target CONSTANT if the same value is stored.
+ // TODO(verwaest): Also support keeping the placeholder
+ // (value->IsUninitialized) as constant.
+ if (details.type() == CONSTANT &&
+ descriptors->GetValue(descriptor) == *value) {
+ lookup->holder()->set_map(transition_map);
+ return *value;
+ }
+
+ Representation representation = details.representation();
+
+ if (!value->FitsRepresentation(representation) ||
+ details.type() == CONSTANT) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation(), FORCE_FIELD);
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ descriptors = transition_map->instance_descriptors();
+ representation = descriptors->GetDetails(descriptor).representation();
+ }
+
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ return lookup->holder()->AddFastPropertyUsingMap(
+ transition_map, *name, *value, field_index, representation);
+}
+
+
+static MaybeObject* SetPropertyToField(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value) {
+ Representation representation = lookup->representation();
+ if (!value->FitsRepresentation(representation) ||
+ lookup->type() == CONSTANT) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->GeneralizeFieldRepresentation(
+ lookup->GetDescriptorIndex(),
+ value->OptimalRepresentation(),
+ FORCE_FIELD);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
+ int descriptor = lookup->GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
+ }
+
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
+ lookup->GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ return *value;
+ }
+
+ lookup->holder()->FastPropertyAtPut(
+ lookup->GetFieldIndex().field_index(), *value);
+ return *value;
+}
+
+
+static MaybeObject* ConvertAndSetLocalProperty(LookupResult* lookup,
+ Name* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ JSObject* object = lookup->holder();
+ if (object->TooManyFastProperties()) {
+ MaybeObject* maybe_failure = object->NormalizeProperties(
+ CLEAR_INOBJECT_PROPERTIES, 0);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+
+ if (!object->HasFastProperties()) {
+ return object->ReplaceSlowProperty(name, value, attributes);
+ }
+
+ int descriptor_index = lookup->GetDescriptorIndex();
+ if (lookup->GetAttributes() == attributes) {
+ MaybeObject* maybe_failure = object->GeneralizeFieldRepresentation(
+ descriptor_index, Representation::Tagged(), FORCE_FIELD);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ } else {
+ Map* map;
+ MaybeObject* maybe_map = object->map()->CopyGeneralizeAllRepresentations(
+ descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
+ if (!maybe_map->To(&map)) return maybe_map;
+ MaybeObject* maybe_failure = object->MigrateToMap(map);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+
+ DescriptorArray* descriptors = object->map()->instance_descriptors();
+ int index = descriptors->GetDetails(descriptor_index).field_index();
+ object->FastPropertyAtPut(index, value);
+ return value;
+}
+
+
+static MaybeObject* SetPropertyToFieldWithAttributes(
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ if (lookup->GetAttributes() == attributes) {
+ if (value->IsUninitialized()) return *value;
+ return SetPropertyToField(lookup, name, value);
+ } else {
+ return ConvertAndSetLocalProperty(lookup, *name, *value, attributes);
+ }
+}
+
+
MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
Name* name_raw,
Object* value_raw,
@@ -3768,9 +3900,10 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
Isolate* isolate = heap->isolate();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We internalize these short keys to avoid constantly
@@ -3848,37 +3981,13 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
case NORMAL:
result = lookup->holder()->SetNormalizedProperty(lookup, *value);
break;
- case FIELD: {
- Representation representation = lookup->representation();
- if (!value->FitsRepresentation(representation)) {
- MaybeObject* maybe_failure =
- lookup->holder()->GeneralizeFieldRepresentation(
- lookup->GetDescriptorIndex(), value->OptimalRepresentation());
- if (maybe_failure->IsFailure()) return maybe_failure;
- DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
- int descriptor = lookup->GetDescriptorIndex();
- representation = desc->GetDetails(descriptor).representation();
- }
- if (FLAG_track_double_fields && representation.IsDouble()) {
- HeapNumber* storage =
- HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
- lookup->GetFieldIndex().field_index()));
- storage->set_value(value->Number());
- result = *value;
- break;
- }
- lookup->holder()->FastPropertyAtPut(
- lookup->GetFieldIndex().field_index(), *value);
- result = *value;
+ case FIELD:
+ result = SetPropertyToField(lookup, name, value);
break;
- }
case CONSTANT:
// Only replace the constant if necessary.
if (*value == lookup->GetConstant()) return *value;
- // Preserve the attributes of this existing property.
- attributes = lookup->GetAttributes();
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
+ result = SetPropertyToField(lookup, name, value);
break;
case CALLBACKS: {
Object* callback_object = lookup->GetCallbackObject();
@@ -3890,55 +3999,7 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
*name, *value, attributes, strict_mode);
break;
case TRANSITION: {
- Map* transition_map = lookup->GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- Representation representation = details.representation();
- if (!value->FitsRepresentation(representation)) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
- descriptor, value->OptimalRepresentation());
- if (!maybe_map->To(&transition_map)) return maybe_map;
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MaybeObject* maybe_failure =
- lookup->holder()->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
- descriptors = transition_map->instance_descriptors();
- representation =
- descriptors->GetDetails(descriptor).representation();
- }
- int field_index = descriptors->GetFieldIndex(descriptor);
- result = lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
- } else {
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
- }
- } else if (details.type() == CALLBACKS) {
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT);
-
- Object* constant = descriptors->GetValue(descriptor);
- if (constant == *value) {
- // If the same constant function is being added we can simply
- // transition to the target map.
- lookup->holder()->set_map(transition_map);
- result = constant;
- } else {
- // Otherwise, replace with a map transition to a new map with a FIELD,
- // even if the value is a constant function.
- result = lookup->holder()->ConvertTransitionToMapTransition(
- lookup->GetTransitionIndex(), *name, *value, attributes);
- }
- }
+ result = SetPropertyUsingTransition(lookup, name, value, attributes);
break;
}
case HANDLER:
@@ -3968,6 +4029,29 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
}
+MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
+ Name* key,
+ Object* value,
+ PropertyAttributes attributes,
+ ValueType value_type,
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
+ // TODO(mstarzinger): The trampoline is a giant hack, don't use it anywhere
+ // else or handlification people will start hating you for all eternity.
+ HandleScope scope(GetIsolate());
+ IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
+ return trampoline.CallWithReturnValue(
+ &JSObject::SetLocalPropertyIgnoreAttributes,
+ Handle<JSObject>(this),
+ Handle<Name>(key),
+ Handle<Object>(value, GetIsolate()),
+ attributes,
+ value_type,
+ mode,
+ extensibility_check);
+}
+
+
// Set a real local property, even if it is READ_ONLY. If the property is not
// present, add it with attributes NONE. This code is an exact clone of
// SetProperty, with the check for IsReadOnly and the check for a
@@ -3983,11 +4067,12 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type,
- StoreMode mode) {
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->SetLocalPropertyIgnoreAttributes(
- *key, *value, attributes, value_type, mode),
+ *key, *value, attributes, value_type, mode, extensibility_check),
Object);
}
@@ -3997,10 +4082,11 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Object* value_raw,
PropertyAttributes attributes,
ValueType value_type,
- StoreMode mode) {
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
Isolate* isolate = GetIsolate();
LookupResult lookup(isolate);
LocalLookup(name_raw, &lookup, true);
@@ -4025,7 +4111,13 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
value_raw,
attributes,
value_type,
- mode);
+ mode,
+ extensibility_check);
+ }
+
+ if (lookup.IsFound() &&
+ (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
+ LocalLookupRealNamedProperty(name_raw, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
@@ -4033,7 +4125,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
// Neither properties nor transitions found.
return AddProperty(
name_raw, value_raw, attributes, kNonStrictMode,
- MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, value_type, mode);
+ MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
}
// From this point on everything needs to be handlified.
@@ -4046,101 +4138,38 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
PropertyAttributes old_attributes = ABSENT;
bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
if (is_observed && lookup.IsProperty()) {
- if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
+ if (lookup.IsDataProperty()) old_value =
+ Object::GetProperty(self, name);
old_attributes = lookup.GetAttributes();
}
// Check of IsReadOnly removed from here in clone.
MaybeObject* result = *value;
switch (lookup.type()) {
- case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- result = self->SetNormalizedProperty(*name, *value, details);
+ case NORMAL:
+ result = self->ReplaceSlowProperty(*name, *value, attributes);
break;
- }
- case FIELD: {
- Representation representation = lookup.representation();
- Representation value_representation =
- value->OptimalRepresentation(value_type);
- if (value_representation.IsNone()) break;
- if (!value_representation.fits_into(representation)) {
- MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
- lookup.GetDescriptorIndex(), value_representation);
- if (maybe_failure->IsFailure()) return maybe_failure;
- DescriptorArray* desc = self->map()->instance_descriptors();
- int descriptor = lookup.GetDescriptorIndex();
- representation = desc->GetDetails(descriptor).representation();
- }
- if (FLAG_track_double_fields && representation.IsDouble()) {
- HeapNumber* storage =
- HeapNumber::cast(self->RawFastPropertyAt(
- lookup.GetFieldIndex().field_index()));
- storage->set_value(value->Number());
- result = *value;
- break;
- }
- self->FastPropertyAtPut(lookup.GetFieldIndex().field_index(), *value);
- result = *value;
+ case FIELD:
+ result = SetPropertyToFieldWithAttributes(
+ &lookup, name, value, attributes);
break;
- }
case CONSTANT:
- // Only replace the function if necessary.
- if (*value != lookup.GetConstant()) {
- // Preserve the attributes of this existing property.
- attributes = lookup.GetAttributes();
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ // Only replace the constant if necessary.
+ if (lookup.GetAttributes() != attributes ||
+ *value != lookup.GetConstant()) {
+ result = SetPropertyToFieldWithAttributes(
+ &lookup, name, value, attributes);
}
break;
case CALLBACKS:
- case INTERCEPTOR:
- // Override callback in clone
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes);
break;
- case TRANSITION: {
- Map* transition_map = lookup.GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- Representation representation = details.representation();
- Representation value_representation =
- value->OptimalRepresentation(value_type);
- if (!value_representation.fits_into(representation)) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
- descriptor, value_representation);
- if (!maybe_map->To(&transition_map)) return maybe_map;
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MaybeObject* maybe_failure = self->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
- descriptors = transition_map->instance_descriptors();
- representation =
- descriptors->GetDetails(descriptor).representation();
- }
- int field_index = descriptors->GetFieldIndex(descriptor);
- result = self->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
- } else {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- }
- } else if (details.type() == CALLBACKS) {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT);
-
- // Replace transition to CONSTANT FUNCTION with a map transition to a
- // new map with a FIELD, even if the value is a function.
- result = self->ConvertTransitionToMapTransition(
- lookup.GetTransitionIndex(), *name, *value, attributes);
- }
+ case TRANSITION:
+ result = SetPropertyUsingTransition(&lookup, name, value, attributes);
break;
- }
- case HANDLER:
case NONEXISTENT:
+ case HANDLER:
+ case INTERCEPTOR:
UNREACHABLE();
}
@@ -4202,20 +4231,20 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
if (name->IsSymbol()) return ABSENT;
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(String::cast(name));
PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQuery query =
- v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
+ v8::NamedPropertyQueryCallback query =
+ v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
v8::Handle<v8::Integer> result =
@@ -4225,8 +4254,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
return static_cast<PropertyAttributes>(result->Int32Value());
}
} else if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
v8::Handle<v8::Value> result =
@@ -4337,25 +4366,27 @@ PropertyAttributes JSObject::GetElementAttributeWithReceiver(
PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
JSReceiver* receiver, uint32_t index, bool continue_search) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSReceiver> hreceiver(receiver);
Handle<JSObject> holder(this);
PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+ v8::IndexedPropertyQueryCallback query =
+ v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
} else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
v8::Handle<v8::Value> result = args.Call(getter, index);
@@ -4448,30 +4479,21 @@ void NormalizedMapCache::Clear() {
}
-void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Code> code) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate,
- object->UpdateMapCodeCache(*name, *code));
-}
-
-
-MaybeObject* JSObject::UpdateMapCodeCache(Name* name, Code* code) {
- if (map()->is_shared()) {
+void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
+ Handle<Name> name,
+ Handle<Code> code) {
+ Handle<Map> map(object->map());
+ if (map->is_shared()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Fast case maps are never marked as shared.
- ASSERT(!HasFastProperties());
+ ASSERT(!receiver->HasFastProperties());
// Replace the map with an identical copy that can be safely modified.
- Object* obj;
- { MaybeObject* maybe_obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- GetIsolate()->counters()->normalized_maps()->Increment();
-
- set_map(Map::cast(obj));
+ map = Map::CopyNormalized(map, KEEP_INOBJECT_PROPERTIES,
+ UNIQUE_NORMALIZED_MAP);
+ receiver->GetIsolate()->counters()->normalized_maps()->Increment();
+ receiver->set_map(*map);
}
- return map()->UpdateCodeCache(name, code);
+ Map::UpdateCodeCache(map, name, code);
}
@@ -4722,7 +4744,7 @@ Smi* JSReceiver::GenerateIdentityHash() {
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
- hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
+ hash_value = isolate->random_number_generator()->NextInt() & Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
@@ -4731,17 +4753,16 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
- MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_string(),
- hash);
- if (maybe->IsFailure()) return maybe;
- return this;
+void JSObject::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->SetHiddenProperty(
+ object->GetHeap()->identity_hash_string(), hash));
}
-int JSObject::GetIdentityHash(Handle<JSObject> obj) {
- CALL_AND_RETRY_OR_DIE(obj->GetIsolate(),
- obj->GetIdentityHash(ALLOW_CREATION),
+int JSObject::GetIdentityHash(Handle<JSObject> object) {
+ CALL_AND_RETRY_OR_DIE(object->GetIsolate(),
+ object->GetIdentityHash(ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
@@ -4766,6 +4787,12 @@ MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
}
+Handle<Object> JSProxy::GetIdentityHash(Handle<JSProxy> proxy,
+ CreationFlag flag) {
+ CALL_HEAP_FUNCTION(proxy->GetIsolate(), proxy->GetIdentityHash(flag), Object);
+}
+
+
MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
Object* hash = this->hash();
if (!hash->IsSmi() && flag == ALLOW_CREATION) {
@@ -4858,30 +4885,27 @@ MaybeObject* JSObject::SetHiddenProperty(Name* key, Object* value) {
}
-void JSObject::DeleteHiddenProperty(Name* key) {
+void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
ASSERT(key->IsUniqueName());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return immediately.
- if (proxy_parent->IsNull()) return;
- ASSERT(proxy_parent->IsJSGlobalObject());
- JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
- return;
+
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return;
+ ASSERT(proto->IsJSGlobalObject());
+ return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key);
}
- ASSERT(!IsJSGlobalProxy());
+
MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ object->GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
Object* inline_value = hidden_lookup->ToObjectUnchecked();
// We never delete (inline-stored) identity hashes.
- ASSERT(key != GetHeap()->identity_hash_string());
+ ASSERT(*key != isolate->heap()->identity_hash_string());
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
- ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
- MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
- USE(delete_result);
- ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
+ Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
+ PutIntoObjectHashTable(hashtable, key, isolate->factory()->the_hole_value());
}
@@ -4952,13 +4976,13 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
ASSERT_EQ(hashtable, new_table);
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_string(),
- hashtable,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK,
- FORCE_FIELD);
+ MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
+ GetHeap()->hidden_string(),
+ hashtable,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return hashtable;
}
@@ -4985,13 +5009,13 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
}
}
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_string(),
- value,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK,
- FORCE_FIELD);
+ MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
+ GetHeap()->hidden_string(),
+ value,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return this;
}
@@ -5022,8 +5046,8 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->deleter()->IsUndefined()) {
- v8::NamedPropertyDeleter deleter =
- v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
+ v8::NamedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-delete", *object, *name));
PropertyCallbackArguments args(
@@ -5046,111 +5070,110 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
}
-MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> AccessorDelete(Handle<JSObject> object,
+ uint32_t index,
+ JSObject::DeleteMode mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->GetElementsAccessor()->Delete(*object,
+ index,
+ mode),
+ Object);
+}
+
+
+Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return heap->false_value();
- v8::IndexedPropertyDeleter deleter =
- v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
- Handle<JSObject> this_handle(this);
+
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return factory->false_value();
+ v8::IndexedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", *object, index));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
v8::Handle<v8::Boolean> result = args.Call(deleter, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
}
- MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
- *this_handle,
- index,
- NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return delete_result;
}
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
+Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
uint32_t index,
DeleteMode mode) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, mode),
- Object);
-}
-
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
-MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->false_value();
}
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(this, isolate);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { name, holder };
+ Handle<Object> name = factory->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
+ factory->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return isolate->heap()->false_value();
+ return factory->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return factory->false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
+ return DeleteElement(Handle<JSObject>::cast(proto), index, mode);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
-
Handle<Object> old_value;
bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && self->map()->is_observed()) {
- should_enqueue_change_record = self->HasLocalElement(index);
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ should_enqueue_change_record = object->HasLocalElement(index);
if (should_enqueue_change_record) {
- old_value = self->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(isolate->factory()->the_hole_value())
- : Object::GetElement(self, index);
+ old_value = object->GetLocalElementAccessorPair(index) != NULL
+ ? Handle<Object>::cast(factory->the_hole_value())
+ : Object::GetElement(isolate, object, index);
}
}
- MaybeObject* result;
// Skip interceptor if forcing deletion.
- if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) {
- result = self->DeleteElementWithInterceptor(index);
+ Handle<Object> result;
+ if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
+ result = DeleteElementWithInterceptor(object, index);
} else {
- result = self->GetElementsAccessor()->Delete(*self, index, mode);
+ result = AccessorDelete(object, index, mode);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (should_enqueue_change_record && !self->HasLocalElement(index)) {
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- EnqueueChangeRecord(self, "deleted", name, old_value);
+ if (should_enqueue_change_record && !object->HasLocalElement(index)) {
+ Handle<String> name = factory->Uint32ToString(index);
+ EnqueueChangeRecord(object, "deleted", name, old_value);
}
- return *hresult;
+ return result;
}
@@ -5923,7 +5946,8 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
uint32_t index,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -5981,9 +6005,9 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair();
accessors->SetComponents(*getter, *setter);
+ accessors->set_access_flags(access_control);
- CALL_HEAP_FUNCTION_VOID(
- isolate, object->SetElementCallback(index, *accessors, attributes));
+ SetElementCallback(object, index, accessors, attributes);
}
@@ -6012,11 +6036,13 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
// We could assert that the property is configurable here, but we would need
// to do a lookup, which seems to be a bit of overkill.
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
if (object->HasFastProperties() && !only_attribute_changes &&
+ access_control == v8::DEFAULT &&
(object->map()->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors)) {
bool getterOk = getter->IsNull() ||
@@ -6028,10 +6054,9 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name);
accessors->SetComponents(*getter, *setter);
+ accessors->set_access_flags(access_control);
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->SetPropertyCallback(*name, *accessors, attributes));
+ SetPropertyCallback(object, name, accessors, attributes);
}
@@ -6049,82 +6074,75 @@ bool JSObject::CanSetCallback(Name* name) {
LookupCallbackProperty(name, &callback_result);
if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo() &&
- AccessorInfo::cast(obj)->prohibits_overwriting()) {
- return false;
+ if (obj->IsAccessorInfo()) {
+ return !AccessorInfo::cast(obj)->prohibits_overwriting();
+ }
+ if (obj->IsAccessorPair()) {
+ return !AccessorPair::cast(obj)->prohibits_overwriting();
}
}
-
return true;
}
-MaybeObject* JSObject::SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes) {
+void JSObject::SetElementCallback(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> structure,
+ PropertyAttributes attributes) {
+ Heap* heap = object->GetHeap();
PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
- SeededNumberDictionary* dictionary;
- { MaybeObject* maybe_dictionary = NormalizeElements();
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
// Update the dictionary with the new CALLBACKS property.
- { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
-
+ dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
+ details);
dictionary->set_requires_slow_elements();
+
// Update the dictionary backing store on the object.
- if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (object->elements()->map() == heap->non_strict_arguments_elements_map()) {
// Also delete any parameter alias.
//
// TODO(kmillikin): when deleting the last parameter alias we could
// switch to a direct backing store without the parameter map. This
// would allow GC of the context.
- FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
- parameter_map->set(index + 2, GetHeap()->the_hole_value());
+ parameter_map->set(index + 2, heap->the_hole_value());
}
- parameter_map->set(1, dictionary);
+ parameter_map->set(1, *dictionary);
} else {
- set_elements(dictionary);
+ object->set_elements(*dictionary);
}
-
- return GetHeap()->undefined_value();
}
-MaybeObject* JSObject::SetPropertyCallback(Name* name,
- Object* structure,
- PropertyAttributes attributes) {
+void JSObject::SetPropertyCallback(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> structure,
+ PropertyAttributes attributes) {
// Normalize object to make this operation simple.
- MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_ok->IsFailure()) return maybe_ok;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
- if (IsGlobalObject()) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (object->IsGlobalObject()) {
+ Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
ASSERT(new_map->is_dictionary_map());
+ object->set_map(*new_map);
- set_map(new_map);
// When running crankshaft, changing the map is not enough. We
// need to deoptimize all functions that rely on this global
// object.
- Deoptimizer::DeoptimizeGlobalObject(this);
+ Deoptimizer::DeoptimizeGlobalObject(*object);
}
// Update the dictionary with the new CALLBACKS property.
PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
- maybe_ok = SetNormalizedProperty(name, structure, details);
- if (maybe_ok->IsFailure()) return maybe_ok;
-
- return GetHeap()->undefined_value();
+ SetNormalizedProperty(object, name, structure, details);
}
@@ -6132,7 +6150,8 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
@@ -6145,14 +6164,18 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return;
ASSERT(proto->IsJSGlobalObject());
- DefineAccessor(
- Handle<JSObject>::cast(proto), name, getter, setter, attributes);
+ DefineAccessor(Handle<JSObject>::cast(proto),
+ name,
+ getter,
+ setter,
+ attributes,
+ access_control);
return;
}
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
@@ -6169,7 +6192,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
if (is_element) {
preexists = object->HasLocalElement(index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(object, index);
+ old_value = Object::GetElement(isolate, object, index);
}
} else {
LookupResult lookup(isolate);
@@ -6182,9 +6205,11 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
if (is_element) {
- DefineElementAccessor(object, index, getter, setter, attributes);
+ DefineElementAccessor(
+ object, index, getter, setter, attributes, access_control);
} else {
- DefinePropertyAccessor(object, name, getter, setter, attributes);
+ DefinePropertyAccessor(
+ object, name, getter, setter, attributes, access_control);
}
if (is_observed) {
@@ -6313,22 +6338,25 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
}
-MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
- Isolate* isolate = GetIsolate();
- Name* name = Name::cast(info->name());
+Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<Name> name(Name::cast(info->name()));
+
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->undefined_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(info);
+ return SetAccessor(Handle<JSObject>::cast(proto), info);
}
// Make sure that the top context does not change when doing callbacks or
@@ -6336,18 +6364,18 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
AssertNoContextChange ncc;
// Try to flatten before operating on the string.
- if (name->IsString()) String::cast(name)->TryFlatten();
+ if (name->IsString()) FlattenString(Handle<String>::cast(name));
- if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
+ if (!object->CanSetCallback(*name)) return factory->undefined_value();
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element) {
- if (IsJSArray()) return isolate->heap()->undefined_value();
+ if (object->IsJSArray()) return factory->undefined_value();
// Accessors overwrite previous callbacks (cf. with getters/setters).
- switch (GetElementsKind()) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -6366,7 +6394,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
case EXTERNAL_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return isolate->heap()->undefined_value();
+ return factory->undefined_value();
case DICTIONARY_ELEMENTS:
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -6374,25 +6402,21 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
break;
}
- MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
+ SetElementCallback(object, index, info, info->property_attributes());
} else {
// Lookup the name.
LookupResult result(isolate);
- LocalLookup(name, &result, true);
+ object->LocalLookup(*name, &result, true);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
- return isolate->heap()->undefined_value();
+ return factory->undefined_value();
}
- MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
+ SetPropertyCallback(object, name, info, info->property_attributes());
}
- return this;
+ return object;
}
@@ -6401,7 +6425,7 @@ MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -6501,6 +6525,15 @@ MaybeObject* Map::RawCopy(int instance_size) {
}
+Handle<Map> Map::CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyNormalized(mode, sharing),
+ Map);
+}
+
+
MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size();
@@ -6580,7 +6613,7 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
} else {
// Descriptor arrays grow by 50%.
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- old_size, old_size < 4 ? 1 : old_size / 2);
+ GetIsolate(), old_size, old_size < 4 ? 1 : old_size / 2);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -6645,7 +6678,7 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
set_transitions(transitions);
result->SetBackPointer(this);
- } else if (flag != OMIT_TRANSITION_KEEP_REPRESENTATIONS) {
+ } else {
descriptors->InitializeRepresentations(Representation::Tagged());
}
@@ -6830,7 +6863,8 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
}
DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(old_size, 1);
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(GetIsolate(), old_size, 1);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -6877,7 +6911,7 @@ MaybeObject* DescriptorArray::CopyUpToAddAttributes(
int size = enumeration_index;
DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = Allocate(size);
+ MaybeObject* maybe_descriptors = Allocate(GetIsolate(), size);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(descriptors);
@@ -6924,7 +6958,8 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
ASSERT_LT(insertion_index, new_size);
DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(GetIsolate(), new_size);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -7714,8 +7749,10 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
#endif
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, int slack) {
- Heap* heap = Isolate::Current()->heap();
+MaybeObject* DescriptorArray::Allocate(Isolate* isolate,
+ int number_of_descriptors,
+ int slack) {
+ Heap* heap = isolate->heap();
// Do not use DescriptorArray::cast on incomplete object.
int size = number_of_descriptors + slack;
if (size == 0) return heap->empty_descriptor_array();
@@ -7773,6 +7810,8 @@ void DescriptorArray::CopyFrom(int dst_index,
MaybeObject* DescriptorArray::Merge(int verbatim,
int valid,
int new_size,
+ int modify_index,
+ StoreMode store_mode,
DescriptorArray* other) {
ASSERT(verbatim <= valid);
ASSERT(valid <= new_size);
@@ -7781,7 +7820,8 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
// Allocate a new descriptor array large enough to hold the required
// descriptors, with minimally the exact same size as this descriptor array.
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- new_size, Max(new_size, other->number_of_descriptors()) - new_size);
+ GetIsolate(), new_size,
+ Max(new_size, other->number_of_descriptors()) - new_size);
if (!maybe_descriptors->To(&result)) return maybe_descriptors;
ASSERT(result->length() > length() ||
result->NumberOfSlackDescriptors() > 0 ||
@@ -7796,7 +7836,7 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
int current_offset = 0;
for (descriptor = 0; descriptor < verbatim; descriptor++) {
if (GetDetails(descriptor).type() == FIELD) current_offset++;
- result->CopyFrom(descriptor, this, descriptor, witness);
+ result->CopyFrom(descriptor, other, descriptor, witness);
}
// |verbatim| -> |valid|
@@ -7806,6 +7846,7 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
PropertyDetails other_details = other->GetDetails(descriptor);
if (details.type() == FIELD || other_details.type() == FIELD ||
+ (store_mode == FORCE_FIELD && descriptor == modify_index) ||
(details.type() == CONSTANT &&
other_details.type() == CONSTANT &&
GetValue(descriptor) != other->GetValue(descriptor))) {
@@ -7824,7 +7865,8 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
// |valid| -> |new_size|
for (; descriptor < new_size; descriptor++) {
PropertyDetails details = other->GetDetails(descriptor);
- if (details.type() == FIELD) {
+ if (details.type() == FIELD ||
+ (store_mode == FORCE_FIELD && descriptor == modify_index)) {
Name* key = other->GetKey(descriptor);
FieldDescriptor d(key,
current_offset++,
@@ -7939,19 +7981,21 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
}
-MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
+MaybeObject* DeoptimizationInputData::Allocate(Isolate* isolate,
+ int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
- pretenure);
+ return isolate->heap()->AllocateFixedArray(LengthFor(deopt_entry_count),
+ pretenure);
}
-MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
+MaybeObject* DeoptimizationOutputData::Allocate(Isolate* isolate,
+ int number_of_deopt_points,
PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
- return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
- pretenure);
+ if (number_of_deopt_points == 0) return isolate->heap()->empty_fixed_array();
+ return isolate->heap()->AllocateFixedArray(
+ LengthOfFixedArray(number_of_deopt_points), pretenure);
}
@@ -7968,8 +8012,34 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
#endif
+static bool IsIdentifier(UnicodeCache* cache, Name* name) {
+ // Checks whether the buffer contains an identifier (no escape).
+ if (!name->IsString()) return false;
+ String* string = String::cast(name);
+ if (string->length() == 0) return false;
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ if (!cache->IsIdentifierStart(stream.GetNext())) {
+ return false;
+ }
+ while (stream.HasMore()) {
+ if (!cache->IsIdentifierPart(stream.GetNext())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool Name::IsCacheable(Isolate* isolate) {
+ return IsSymbol() ||
+ IsIdentifier(isolate->unicode_cache(), this) ||
+ this == isolate->heap()->hidden_string();
+}
+
+
bool String::LooksValid() {
- if (!Isolate::Current()->heap()->Contains(this)) return false;
+ if (!GetIsolate()->heap()->Contains(this)) return false;
return true;
}
@@ -8129,8 +8199,7 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
}
-void Relocatable::PostGarbageCollectionProcessing() {
- Isolate* isolate = Isolate::Current();
+void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
while (current != NULL) {
current->PostGarbageCollection();
@@ -8141,7 +8210,7 @@ void Relocatable::PostGarbageCollectionProcessing() {
// Reserve space for statics needing saving and restoring.
int Relocatable::ArchiveSpacePerThread() {
- return sizeof(Isolate::Current()->relocatable_top());
+ return sizeof(Relocatable*); // NOLINT
}
@@ -8167,8 +8236,7 @@ char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
}
-void Relocatable::Iterate(ObjectVisitor* v) {
- Isolate* isolate = Isolate::Current();
+void Relocatable::Iterate(Isolate* isolate, ObjectVisitor* v) {
Iterate(v, isolate->relocatable_top());
}
@@ -8944,6 +9012,7 @@ AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
// involves carefully checking the object immediately after the JSArray
// (if there is one) to see if it's an AllocationMemento.
if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
+ ASSERT(object->GetHeap()->InToSpace(object));
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
object->Size();
if ((ptr_end + AllocationMemento::kSize) <=
@@ -8954,7 +9023,7 @@ AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
if (*possible_allocation_memento_map ==
object->GetHeap()->allocation_memento_map()) {
AllocationMemento* memento = AllocationMemento::cast(
- reinterpret_cast<Object*>(ptr_end + 1));
+ reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
return memento;
}
}
@@ -9229,46 +9298,34 @@ void JSFunction::MarkForLazyRecompilation() {
}
-void JSFunction::MarkForParallelRecompilation() {
+void JSFunction::MarkForConcurrentRecompilation() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(!shared()->is_generator());
- ASSERT(FLAG_parallel_recompilation);
- if (FLAG_trace_parallel_recompilation) {
+ ASSERT(FLAG_concurrent_recompilation);
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
PrintName();
- PrintF(" for parallel recompilation.\n");
+ PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile));
- // No write barrier required, since the builtin is part of the root set.
-}
-
-
-void JSFunction::MarkForInstallingRecompiledCode() {
- // The debugger could have switched the builtin to lazy compile.
- // In that case, simply carry on. It will be dealt with later.
- ASSERT(!IsOptimized());
- ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_parallel_recompilation);
- set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kInstallRecompiledCode));
+ GetIsolate()->builtins()->builtin(Builtins::kConcurrentRecompile));
// No write barrier required, since the builtin is part of the root set.
}
void JSFunction::MarkInRecompileQueue() {
- // We can only arrive here via the parallel-recompilation builtin. If
+ // We can only arrive here via the concurrent-recompilation builtin. If
// break points were set, the code would point to the lazy-compile builtin.
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
- ASSERT(IsMarkedForParallelRecompilation() && !IsOptimized());
+ ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_parallel_recompilation);
- if (FLAG_trace_parallel_recompilation) {
+ ASSERT(FLAG_concurrent_recompilation);
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
PrintName();
- PrintF(" for parallel recompilation.\n");
+ PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
@@ -9282,7 +9339,7 @@ static bool CompileLazyHelper(CompilationInfo* info,
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
ASSERT(!info->isolate()->has_pending_exception());
bool result = Compiler::CompileLazy(info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
+ ASSERT(result != info->isolate()->has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) {
info->isolate()->clear_pending_exception();
}
@@ -9451,11 +9508,25 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
}
+Handle<Code> JSFunction::CompileOsr(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ ClearExceptionFlag flag) {
+ CompilationInfoWithZone info(function);
+ info.SetOptimizing(osr_ast_id);
+ if (CompileLazyHelper(&info, flag)) {
+ // TODO(titzer): don't install the OSR code.
+ // ASSERT(function->code() != *info.code());
+ return info.code();
+ } else {
+ return Handle<Code>::null();
+ }
+}
+
+
bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
ClearExceptionFlag flag) {
CompilationInfoWithZone info(function);
- info.SetOptimizing(osr_ast_id);
+ info.SetOptimizing(BailoutId::None());
return CompileLazyHelper(&info, flag);
}
@@ -9481,21 +9552,13 @@ bool JSFunction::IsInlineable() {
void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), object->OptimizeAsPrototype());
-}
-
-
-MaybeObject* JSObject::OptimizeAsPrototype() {
- if (IsGlobalObject()) return this;
+ if (object->IsGlobalObject()) return;
// Make sure prototypes are fast objects and their maps have the bit set
// so they remain fast.
- if (!HasFastProperties()) {
- MaybeObject* new_proto = TransformToFastProperties(0);
- if (new_proto->IsFailure()) return new_proto;
- ASSERT(new_proto == this);
+ if (!object->HasFastProperties()) {
+ TransformToFastProperties(object, 0);
}
- return this;
}
@@ -9647,40 +9710,38 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
}
-bool JSFunction::PassesHydrogenFilter() {
+// The filter is a pattern that matches function names in this way:
+// "*" all; the default
+// "-" all but the top-level function
+// "-name" all but the function "name"
+// "" only the top-level function
+// "name" only the function "name"
+// "name*" only functions starting with "name"
+bool JSFunction::PassesFilter(const char* raw_filter) {
+ if (*raw_filter == '*') return true;
String* name = shared()->DebugName();
- // The filter string is a pattern that matches functions in this way:
- // "*" all; the default
- // "-" all but the top-level function
- // "-name" all but the function "name"
- // "" only the top-level function
- // "name" only the function "name"
- // "name*" only functions starting with "name"
- if (*FLAG_hydrogen_filter != '*') {
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if (filter.length() == 0) return name->length() == 0;
- if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
- if (filter[0] == '-' &&
- !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
- return true;
- }
- if (filter[filter.length() - 1] == '*' &&
- name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
- return true;
- }
- return false;
+ Vector<const char> filter = CStrVector(raw_filter);
+ if (filter.length() == 0) return name->length() == 0;
+ if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
+ if (filter[0] == '-' &&
+ !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return true;
}
-
- return true;
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
+ return true;
+ }
+ return false;
}
-MaybeObject* Oddball::Initialize(const char* to_string,
+MaybeObject* Oddball::Initialize(Heap* heap,
+ const char* to_string,
Object* to_number,
byte kind) {
String* internalized_to_string;
{ MaybeObject* maybe_string =
- Isolate::Current()->heap()->InternalizeUtf8String(
+ heap->InternalizeUtf8String(
CStrVector(to_string));
if (!maybe_string->To(&internalized_to_string)) return maybe_string;
}
@@ -9814,12 +9875,16 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
// non-optimizable if optimization is disabled for the shared
// function info.
set_optimization_disabled(true);
+ set_bailout_reason(reason);
// Code should be the lazy compilation stub or else unoptimized. If the
// latter, disable optimization for the code too.
ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
if (code()->kind() == Code::FUNCTION) {
code()->set_optimizable(false);
}
+ PROFILE(GetIsolate(),
+ LogExistingFunction(Handle<SharedFunctionInfo>(this),
+ Handle<Code>(code())));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
ShortPrint();
@@ -10317,7 +10382,7 @@ void Code::ClearInlineCaches() {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- IC::Clear(info->pc());
+ IC::Clear(this->GetIsolate(), info->pc());
}
}
}
@@ -10341,6 +10406,18 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
}
+BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(kind() == FUNCTION);
+ for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc);
+ !it.Done();
+ it.Next()) {
+ if (it.pc_offset() == pc_offset) return it.ast_id();
+ }
+ return BailoutId::None();
+}
+
+
bool Code::allowed_in_shared_map_code_cache() {
return is_keyed_load_stub() || is_keyed_store_stub() ||
(is_compare_ic_stub() &&
@@ -10401,7 +10478,7 @@ int Code::GetAge() {
void Code::GetCodeAgeAndParity(Code* code, Age* age,
MarkingParity* parity) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = code->GetIsolate();
Builtins* builtins = isolate->builtins();
Code* stub = NULL;
#define HANDLE_CODE_AGE(AGE) \
@@ -10801,7 +10878,8 @@ void Code::Disassemble(const char* name, FILE* out) {
// If there is no back edge table, the "table start" will be at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
- FullCodeGenerator::BackEdgeTableIterator back_edges(this);
+ DisallowHeapAllocation no_gc;
+ FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc);
PrintF(out, "Back edges (size = %u)\n", back_edges.table_length());
PrintF(out, "ast_id pc_offset loop_depth\n");
@@ -10977,7 +11055,7 @@ static bool GetOldValue(Isolate* isolate,
ASSERT(attributes != ABSENT);
if (attributes == DONT_DELETE) return false;
old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
- ? Object::GetElement(object, index)
+ ? Object::GetElement(isolate, object, index)
: Handle<Object>::cast(isolate->factory()->the_hole_value()));
indices->Add(index);
return true;
@@ -10997,7 +11075,8 @@ static void EnqueueSpliceRecord(Handle<JSArray> object,
{ object, index_object, deleted, add_count_object };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_enqueue_splice()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_enqueue_splice()),
isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
&threw);
ASSERT(!threw);
@@ -11010,7 +11089,8 @@ static void BeginPerformSplice(Handle<JSArray> object) {
Handle<Object> args[] = { object };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_begin_perform_splice()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_begin_perform_splice()),
isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
&threw);
ASSERT(!threw);
@@ -11023,7 +11103,8 @@ static void EndPerformSplice(Handle<JSArray> object) {
Handle<Object> args[] = { object };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_end_perform_splice()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_end_perform_splice()),
isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
&threw);
ASSERT(!threw);
@@ -11343,8 +11424,9 @@ void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
bool DependentCode::Contains(DependencyGroup group, Code* code) {
GroupStartIndexes starts(this);
- int number_of_entries = starts.number_of_entries();
- for (int i = 0; i < number_of_entries; i++) {
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ for (int i = start; i < end; i++) {
if (object_at(i) == code) return true;
}
return false;
@@ -11362,13 +11444,15 @@ void DependentCode::DeoptimizeDependentCodeGroup(
int code_entries = starts.number_of_entries();
if (start == end) return;
- // Collect all the code to deoptimize.
- Zone zone(isolate);
- ZoneList<Code*> codes(end - start, &zone);
+ // Mark all the code that needs to be deoptimized.
+ bool marked = false;
for (int i = start; i < end; i++) {
if (is_code_at(i)) {
Code* code = code_at(i);
- if (!code->marked_for_deoptimization()) codes.Add(code, &zone);
+ if (!code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ marked = true;
+ }
} else {
CompilationInfo* info = compilation_info_at(i);
info->AbortDueToDependencyChange();
@@ -11384,7 +11468,8 @@ void DependentCode::DeoptimizeDependentCodeGroup(
clear_at(i);
}
set_number_of_entries(group, 0);
- Deoptimizer::DeoptimizeCodeList(isolate, &codes);
+
+ if (marked) Deoptimizer::DeoptimizeMarkedCode(isolate);
}
@@ -11539,16 +11624,18 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
Handle<Object> value_handle(value, isolate);
if (!interceptor->setter()->IsUndefined()) {
- v8::IndexedPropertySetter setter =
- v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
+ v8::IndexedPropertySetterCallback setter =
+ v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
@@ -11581,7 +11668,8 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Handle<ExecutableAccessorInfo> data(
ExecutableAccessorInfo::cast(structure));
Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ v8::AccessorGetterCallback call_fun =
+ v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
@@ -11646,7 +11734,8 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
Handle<ExecutableAccessorInfo> data(
ExecutableAccessorInfo::cast(structure));
Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
@@ -12100,18 +12189,17 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
}
-MaybeObject* JSReceiver::SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_proto) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->SetElementWithHandler(
- this, index, value, strict_mode);
- } else {
- return JSObject::cast(this)->SetElement(
- index, value, attributes, strict_mode, check_proto);
+Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::SetElementWithHandler(
+ Handle<JSProxy>::cast(object), object, index, value, strict_mode);
}
+ return JSObject::SetElement(
+ Handle<JSObject>::cast(object), index, value, attributes, strict_mode);
}
@@ -12136,7 +12224,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
if (object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(object->GetIsolate(), value, &has_exception);
if (has_exception) return Handle<Object>();
value = number;
}
@@ -12213,7 +12302,7 @@ MaybeObject* JSObject::SetElement(uint32_t index,
if (old_attributes != ABSENT) {
if (self->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(self, index);
+ old_value = Object::GetElement(isolate, self, index);
} else if (self->IsJSArray()) {
// Store old array length in case adding an element grows the array.
old_length_handle = handle(Handle<JSArray>::cast(self)->length(), isolate);
@@ -12255,7 +12344,7 @@ MaybeObject* JSObject::SetElement(uint32_t index,
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(self, "reconfigured", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(self, index);
+ Handle<Object> new_value = Object::GetElement(isolate, self, index);
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
@@ -12547,16 +12636,18 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
Handle<Object> this_handle(receiver, isolate);
Handle<JSObject> holder_handle(this, isolate);
if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
PropertyCallbackArguments
@@ -12581,7 +12672,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
Object* pt = holder_handle->GetPrototype();
if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(*this_handle, index);
+ return pt->GetElementWithReceiver(isolate, *this_handle, index);
}
@@ -12860,8 +12951,8 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
Handle<String> name_handle(String::cast(name));
if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
PropertyCallbackArguments
@@ -13697,6 +13788,74 @@ MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
template<typename Shape, typename Key>
+uint32_t HashTable<Shape, Key>::EntryForProbe(Key key,
+ Object* k,
+ int probe,
+ uint32_t expected) {
+ uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ for (int i = 1; i < probe; i++) {
+ if (entry == expected) return expected;
+ entry = NextProbe(entry, i, capacity);
+ }
+ return entry;
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::Swap(uint32_t entry1,
+ uint32_t entry2,
+ WriteBarrierMode mode) {
+ int index1 = EntryToIndex(entry1);
+ int index2 = EntryToIndex(entry2);
+ Object* temp[Shape::kEntrySize];
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ temp[j] = get(index1 + j);
+ }
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ set(index1 + j, get(index2 + j), mode);
+ }
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ set(index2 + j, temp[j], mode);
+ }
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::Rehash(Key key) {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
+ uint32_t capacity = Capacity();
+ bool done = false;
+ for (int probe = 1; !done; probe++) {
+ // All elements at entries given by one of the first _probe_ probes
+ // are placed correctly. Other elements might need to be moved.
+ done = true;
+ for (uint32_t current = 0; current < capacity; current++) {
+ Object* current_key = get(EntryToIndex(current));
+ if (IsKey(current_key)) {
+ uint32_t target = EntryForProbe(key, current_key, probe, current);
+ if (current == target) continue;
+ Object* target_key = get(EntryToIndex(target));
+ if (!IsKey(target_key) ||
+ EntryForProbe(key, target_key, probe, target) != target) {
+ // Put the current element into the correct position.
+ Swap(current, target, mode);
+ // The other element will be processed on the next iteration.
+ current--;
+ } else {
+ // The place for the current element is occupied. Leave the element
+ // for the next probe.
+ done = false;
+ }
+ }
+ }
+ }
+}
+
+
+template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
@@ -15226,7 +15385,7 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
// Allocate the instance descriptor.
DescriptorArray* descriptors;
MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length);
+ DescriptorArray::Allocate(GetIsolate(), instance_descriptor_length);
if (!maybe_descriptors->To(&descriptors)) {
return maybe_descriptors;
}
@@ -15333,6 +15492,7 @@ MaybeObject* ObjectHashSet::Add(Object* key) {
int hash;
{ MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
+ ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
int entry = FindEntry(key);
@@ -15394,6 +15554,7 @@ MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
int hash;
{ MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
+ ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
int entry = FindEntry(key);
@@ -15517,7 +15678,7 @@ void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
int code_position,
Handle<Object> break_point_object) {
Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
- Isolate::Current());
+ debug_info->GetIsolate());
if (break_point_info->IsUndefined()) return;
BreakPointInfo::ClearBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
@@ -15530,7 +15691,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
int source_position,
int statement_position,
Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = debug_info->GetIsolate();
Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
isolate);
if (!break_point_info->IsUndefined()) {
@@ -15644,7 +15805,7 @@ int DebugInfo::GetBreakPointInfoIndex(int code_position) {
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
if (break_point_info->break_point_objects()->IsUndefined()) return;
// If there is a single break point clear it if it is the same.
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 178a61191..d3593b6ed 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -232,6 +232,13 @@ static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
+// Indicates whether a value can be loaded as a constant.
+enum StoreMode {
+ ALLOW_AS_CONSTANT,
+ FORCE_FIELD
+};
+
+
// PropertyNormalizationMode is used to specify whether to keep
// inobject properties when normalizing properties of a JSObject.
enum PropertyNormalizationMode {
@@ -258,7 +265,6 @@ enum CreationFlag {
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
- OMIT_TRANSITION_KEEP_REPRESENTATIONS,
OMIT_TRANSITION
};
@@ -1107,6 +1113,8 @@ class MaybeObject BASE_EMBEDDED {
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
V(kExpectedAlignmentMarker, "expected alignment marker") \
+ V(kExpectedAllocationSiteInCell, \
+ "Expected AllocationSite in property cell") \
V(kExpectedPropertyCellInRegisterA2, \
"Expected property cell in register a2") \
V(kExpectedPropertyCellInRegisterEbx, \
@@ -1115,6 +1123,7 @@ class MaybeObject BASE_EMBEDDED {
"Expected property cell in register rbx") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
+ V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
"external string expected, but not found") \
V(kFailedBailedOutLastTime, "failed/bailed out last time") \
@@ -1134,6 +1143,7 @@ class MaybeObject BASE_EMBEDDED {
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
+ V(kImportDeclaration, "Import declaration") \
V(kImproperObjectOnPrototypeChainForStore, \
"improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
@@ -1190,6 +1200,12 @@ class MaybeObject BASE_EMBEDDED {
V(kLookupVariableInCountOperation, \
"lookup variable in count operation") \
V(kMapIsNoLongerInEax, "Map is no longer in eax") \
+ V(kModuleDeclaration, "Module declaration") \
+ V(kModuleLiteral, "Module literal") \
+ V(kModulePath, "Module path") \
+ V(kModuleStatement, "Module statement") \
+ V(kModuleVariable, "Module variable") \
+ V(kModuleUrl, "Module url") \
V(kNoCasesLeft, "no cases left") \
V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
"No empty arrays here in EmitFastAsciiArrayJoin") \
@@ -1200,6 +1216,8 @@ class MaybeObject BASE_EMBEDDED {
V(kNonSmiValue, "Non-smi value") \
V(kNotEnoughVirtualRegistersForValues, \
"not enough virtual registers for values") \
+ V(kNotEnoughSpillSlotsForOsr, \
+ "not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
"not enough virtual registers (regalloc)") \
V(kObjectFoundInSmiOnlyArray, "object found in smi-only array") \
@@ -1231,7 +1249,7 @@ class MaybeObject BASE_EMBEDDED {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "register was clobbered") \
V(kScopedBlock, "ScopedBlock") \
- V(kSharedFunctionInfoLiteral, "SharedFunctionInfoLiteral") \
+ V(kSharedFunctionInfoLiteral, "Shared function info literal") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kStackFrameTypesMustMatch, "stack frame types must match") \
@@ -1317,7 +1335,8 @@ class MaybeObject BASE_EMBEDDED {
"we should not have an empty lexical context") \
V(kWithStatement, "WithStatement") \
V(kWrongAddressOrValuePassedToRecordWrite, \
- "Wrong address or value passed to RecordWrite")
+ "Wrong address or value passed to RecordWrite") \
+ V(kYield, "Yield")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
@@ -1430,8 +1449,8 @@ class Object : public MaybeObject {
inline bool HasSpecificClassOf(String* name);
- MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
- bool BooleanValue(); // ECMA-262 9.2.
+ MUST_USE_RESULT MaybeObject* ToObject(Isolate* isolate); // ECMA-262 9.9.
+ bool BooleanValue(); // ECMA-262 9.2.
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
@@ -1453,7 +1472,8 @@ class Object : public MaybeObject {
Name* key,
PropertyAttributes* attributes);
- static Handle<Object> GetProperty(Handle<Object> object, Handle<Name> key);
+ static Handle<Object> GetProperty(Handle<Object> object,
+ Handle<Name> key);
static Handle<Object> GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
@@ -1475,11 +1495,15 @@ class Object : public MaybeObject {
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter);
- static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index);
+ static Handle<Object> GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+ MUST_USE_RESULT inline MaybeObject* GetElement(Isolate* isolate,
+ uint32_t index);
// For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver,
+ inline Object* GetElementNoExceptionThrown(Isolate* isolate, uint32_t index);
+ MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Isolate* isolate,
+ Object* receiver,
uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
@@ -1511,10 +1535,7 @@ class Object : public MaybeObject {
inline void VerifyApiCallResultType();
// Prints this object without details.
- inline void ShortPrint() {
- ShortPrint(stdout);
- }
- void ShortPrint(FILE* out);
+ void ShortPrint(FILE* out = stdout);
// Prints this object without details to a message accumulator.
void ShortPrint(StringStream* accumulator);
@@ -1553,10 +1574,7 @@ class Smi: public Object {
static inline Smi* cast(Object* object);
// Dispatched behavior.
- inline void SmiPrint() {
- SmiPrint(stdout);
- }
- void SmiPrint(FILE* out);
+ void SmiPrint(FILE* out = stdout);
void SmiPrint(StringStream* accumulator);
DECLARE_VERIFIER(Smi)
@@ -1627,10 +1645,7 @@ class Failure: public MaybeObject {
static inline Failure* cast(MaybeObject* object);
// Dispatched behavior.
- inline void FailurePrint() {
- FailurePrint(stdout);
- }
- void FailurePrint(FILE* out);
+ void FailurePrint(FILE* out = stdout);
void FailurePrint(StringStream* accumulator);
DECLARE_VERIFIER(Failure)
@@ -1711,9 +1726,7 @@ class HeapObject: public Object {
// The Heap the object was allocated in. Used also to access Isolate.
inline Heap* GetHeap();
- // Convenience method to get current isolate. This method can be
- // accessed only when its result is the same as
- // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
+ // Convenience method to get current isolate.
inline Isolate* GetIsolate();
// Converts an address to a HeapObject pointer.
@@ -1745,6 +1758,13 @@ class HeapObject: public Object {
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
+ // Adds the |code| object related to |name| to the code cache of this map. If
+ // this map is a dictionary map that is shared, the map copied and installed
+ // onto the object.
+ static void UpdateMapCodeCache(Handle<HeapObject> object,
+ Handle<Name> name,
+ Handle<Code> code);
+
// Casting.
static inline HeapObject* cast(Object* obj);
@@ -1759,12 +1779,9 @@ class HeapObject: public Object {
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void HeapObjectPrint() {
- HeapObjectPrint(stdout);
- }
- void HeapObjectPrint(FILE* out);
void PrintHeader(FILE* out, const char* id);
#endif
+ DECLARE_PRINTER(HeapObject)
DECLARE_VERIFIER(HeapObject)
#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
@@ -1848,10 +1865,7 @@ class HeapNumber: public HeapObject {
// Dispatched behavior.
bool HeapNumberBooleanValue();
- inline void HeapNumberPrint() {
- HeapNumberPrint(stdout);
- }
- void HeapNumberPrint(FILE* out);
+ void HeapNumberPrint(FILE* out = stdout);
void HeapNumberPrint(StringStream* accumulator);
DECLARE_VERIFIER(HeapNumber)
@@ -1926,12 +1940,6 @@ class JSReceiver: public HeapObject {
CERTAINLY_NOT_STORE_FROM_KEYED
};
- // Indicates whether a value can be loaded as a constant.
- enum StoreMode {
- ALLOW_AS_CONSTANT,
- FORCE_FIELD
- };
-
// Internal properties (e.g. the hidden properties dictionary) might
// be added even though the receiver is non-extensible.
enum ExtensibilityCheck {
@@ -1947,6 +1955,11 @@ class JSReceiver: public HeapObject {
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
+ static Handle<Object> SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* SetPropertyOrFail(
Handle<JSReceiver> object,
@@ -1978,15 +1991,7 @@ class JSReceiver: public HeapObject {
DeleteMode mode = NORMAL_DELETION);
static Handle<Object> DeleteElement(Handle<JSReceiver> object,
uint32_t index,
- DeleteMode mode);
-
- // Set the index'th array element.
- // Can cause GC, or return failure if GC is required.
- MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype);
+ DeleteMode mode = NORMAL_DELETION);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -2159,7 +2164,6 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check,
StoreMode mode = ALLOW_AS_CONSTANT);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
@@ -2168,7 +2172,8 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
@@ -2192,12 +2197,13 @@ class JSObject: public JSReceiver {
inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
+ MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
Name* key,
Object* value,
PropertyAttributes attributes,
ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
@@ -2226,7 +2232,6 @@ class JSObject: public JSReceiver {
PropertyDetails details);
static void OptimizeAsPrototype(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
@@ -2252,11 +2257,13 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control = v8::DEFAULT);
MaybeObject* LookupAccessor(Name* name, AccessorComponent component);
- MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
+ static Handle<Object> SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info);
// Used from Object::GetProperty().
MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
@@ -2307,23 +2314,18 @@ class JSObject: public JSReceiver {
Object* GetHiddenProperty(Name* key);
// Deletes a hidden property. Deleting a non-existing property is
// considered successful.
- void DeleteHiddenProperty(Name* key);
+ static void DeleteHiddenProperty(Handle<JSObject> object,
+ Handle<Name> key);
// Returns true if the object has a property with the hidden string as name.
bool HasHiddenProperties();
- static int GetIdentityHash(Handle<JSObject> obj);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
-
- static Handle<Object> DeleteElement(Handle<JSObject> obj,
- uint32_t index,
- DeleteMode mode = NORMAL_DELETION);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+ static int GetIdentityHash(Handle<JSObject> object);
+ static void SetIdentityHash(Handle<JSObject> object, Smi* hash);
inline void ValidateElements();
// Makes sure that this object can contain HeapObject as elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
+ static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
@@ -2499,7 +2501,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* AddConstantProperty(
Name* name,
Object* constant,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ TransitionFlag flag);
MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
Name* name,
@@ -2522,25 +2525,11 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
- // Replaces an existing transition with a transition to a map with a FIELD.
- MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
- int transition_index,
- Name* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Converts a descriptor of any other type to a real field, backed by the
- // properties array.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
- Name* name,
- Object* new_value,
- PropertyAttributes attributes,
- TransitionFlag flag = OMIT_TRANSITION);
-
MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
int modify_index,
- Representation new_representation);
+ Representation new_representation,
+ StoreMode store_mode);
// Add a property to a fast-case object.
MUST_USE_RESULT MaybeObject* AddFastProperty(
@@ -2548,7 +2537,8 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ValueType value_type = OPTIMAL_REPRESENTATION);
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ TransitionFlag flag = INSERT_TRANSITION);
// Add a property to a slow-case object.
MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
@@ -2564,7 +2554,8 @@ class JSObject: public JSReceiver {
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ TransitionFlag flag = INSERT_TRANSITION);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2585,12 +2576,6 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* NormalizeElements();
- static void UpdateMapCodeCache(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Code> code);
-
- MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(Name* name, Code* code);
-
// Transform slow named properties to fast variants.
// Returns failure if allocation failed.
static void TransformToFastProperties(Handle<JSObject> object,
@@ -2653,25 +2638,17 @@ class JSObject: public JSReceiver {
DECLARE_PRINTER(JSObject)
DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
- inline void PrintProperties() {
- PrintProperties(stdout);
- }
- void PrintProperties(FILE* out);
-
- inline void PrintElements() {
- PrintElements(stdout);
- }
- void PrintElements(FILE* out);
- inline void PrintTransitions() {
- PrintTransitions(stdout);
- }
- void PrintTransitions(FILE* out);
+ void PrintProperties(FILE* out = stdout);
+ void PrintElements(FILE* out = stdout);
+ void PrintTransitions(FILE* out = stdout);
#endif
void PrintElementsTransition(
FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
ElementsKind to_kind, FixedArrayBase* to_elements);
+ void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
+
#ifdef DEBUG
// Structure for collecting spill information about JSObjects.
class SpillInformation {
@@ -2698,7 +2675,8 @@ class JSObject: public JSReceiver {
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
- inline bool TooManyFastProperties(int properties, StoreFromKeyed store_mode);
+ inline bool TooManyFastProperties(
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
@@ -2719,10 +2697,9 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
- // TODO(2790): HAllocate currently always allocates fast backing stores
- // in new space, where on x64 we can only fit ~98K elements. Keep this
- // limit lower than that until HAllocate is made smarter.
- static const int kInitialMaxFastElementArray = 95000;
+ // Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
+ // permissible values (see the ASSERT in heap.cc).
+ static const int kInitialMaxFastElementArray = 100000;
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 64;
@@ -2757,6 +2734,15 @@ class JSObject: public JSReceiver {
friend class DictionaryElementsAccessor;
friend class JSReceiver;
+ // TODO(mstarzinger): Soon to be handlified.
+ MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
+ Name* key,
+ Object* value,
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
@@ -2815,11 +2801,11 @@ class JSObject: public JSReceiver {
Handle<Name> name,
DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
- MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
- DeleteMode mode);
+ static Handle<Object> DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode);
+ static Handle<Object> DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index);
bool ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
@@ -2832,26 +2818,28 @@ class JSObject: public JSReceiver {
void GetElementsCapacityAndUsage(int* capacity, int* used);
bool CanSetCallback(Name* name);
- MUST_USE_RESULT MaybeObject* SetElementCallback(
- uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* SetPropertyCallback(
- Name* name,
- Object* structure,
- PropertyAttributes attributes);
+ static void SetElementCallback(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> structure,
+ PropertyAttributes attributes);
+ static void SetPropertyCallback(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> structure,
+ PropertyAttributes attributes);
static void DefineElementAccessor(Handle<JSObject> object,
uint32_t index,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control);
static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object,
Handle<Name> name);
static void DefinePropertyAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control);
// Try to define a single accessor paying attention to map transitions.
// Returns false if this was not possible and we have to use the slow case.
@@ -2877,6 +2865,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
Object* value);
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2918,11 +2908,7 @@ class FixedArray: public FixedArrayBase {
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
- // TODO(isolates): duplicate.
- inline void set_undefined(Heap* heap, int index);
inline void set_null(int index);
- // TODO(isolates): duplicate.
- inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
inline Object** GetFirstElementAddress();
@@ -3192,6 +3178,8 @@ class DescriptorArray: public FixedArray {
MUST_USE_RESULT MaybeObject* Merge(int verbatim,
int valid,
int new_size,
+ int modify_index,
+ StoreMode store_mode,
DescriptorArray* other);
bool IsMoreGeneralThan(int verbatim,
@@ -3219,7 +3207,8 @@ class DescriptorArray: public FixedArray {
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int number_of_descriptors,
int slack = 0);
// Casting.
@@ -3253,10 +3242,7 @@ class DescriptorArray: public FixedArray {
#ifdef OBJECT_PRINT
// Print all the descriptors.
- inline void PrintDescriptors() {
- PrintDescriptors(stdout);
- }
- void PrintDescriptors(FILE* out);
+ void PrintDescriptors(FILE* out = stdout);
#endif
#ifdef DEBUG
@@ -3494,6 +3480,9 @@ class HashTable: public FixedArray {
inline int FindEntry(Key key);
int FindEntry(Isolate* isolate, Key key);
+ // Rehashes the table in-place.
+ void Rehash(Key key);
+
protected:
// Find the entry at which to insert element with the given key that
// has the given hash value.
@@ -3540,6 +3529,13 @@ class HashTable: public FixedArray {
return (last + number) & (size - 1);
}
+ // Returns _expected_ if one of entries given by the first _probe_ probes is
+ // equal to _expected_. Otherwise, returns the entry given by the probe
+ // number _probe_.
+ uint32_t EntryForProbe(Key key, Object* k, int probe, uint32_t expected);
+
+ void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
+
// Rehashes this hash-table into the new table.
MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
@@ -3749,10 +3745,7 @@ class Dictionary: public HashTable<Shape, Key> {
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
#ifdef OBJECT_PRINT
- inline void Print() {
- Print(stdout);
- }
- void Print(FILE* out);
+ void Print(FILE* out = stdout);
#endif
// Returns the key (slow).
Object* SlowReverseLookup(Object* value);
@@ -4659,7 +4652,8 @@ class DeoptimizationInputData: public FixedArray {
}
// Allocates a DeoptimizationInputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int deopt_entry_count,
PretenureFlag pretenure);
// Casting.
@@ -4705,7 +4699,8 @@ class DeoptimizationOutputData: public FixedArray {
}
// Allocates a DeoptimizationOutputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int number_of_deopt_points,
PretenureFlag pretenure);
// Casting.
@@ -4834,10 +4829,7 @@ class Code: public HeapObject {
static const char* ICState2String(InlineCacheState state);
static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
- inline void Disassemble(const char* name) {
- Disassemble(name, stdout);
- }
- void Disassemble(const char* name, FILE* out);
+ void Disassemble(const char* name, FILE* out = stdout);
#endif // ENABLE_DISASSEMBLER
// [instruction_size]: Size of the native instructions
@@ -4854,21 +4846,19 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [type_feedback_info]: Struct containing type feedback information for
- // unoptimized code. Optimized code can temporarily store the head of
- // the list of code to be deoptimized during mark-compact GC.
- // STUBs can use this slot to store arbitrary information as a Smi.
- // Will contain either a TypeFeedbackInfo object, or JSFunction object,
- // or undefined, or a Smi.
+ // [type_feedback_info]: This field stores various things, depending on the
+ // kind of the code object.
+ // FUNCTION => type feedback information.
+ // STUB => various things, e.g. a SMI
+ // OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
DECL_ACCESSORS(type_feedback_info, Object)
inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
inline int stub_info();
inline void set_stub_info(int info);
- // Used during GC to code a list of code objects to deoptimize.
- inline Object* code_to_deoptimize_link();
- inline void set_code_to_deoptimize_link(Object* value);
- inline Object** code_to_deoptimize_link_slot();
+ // [next_code_link]: Link for lists of optimized or deoptimized code.
+ // Note that storage for this field is overlapped with typefeedback_info.
+ DECL_ACCESSORS(next_code_link, Object)
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -5148,6 +5138,8 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearTypeFeedbackCells(Heap* heap);
+ BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
+
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kNoAge = 0,
@@ -5187,6 +5179,7 @@ class Code: public HeapObject {
kHandlerTableOffset + kPointerSize;
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset; // Shared.
static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
@@ -5621,11 +5614,30 @@ class Map: public HeapObject {
static Handle<Map> GeneralizeRepresentation(
Handle<Map> map,
int modify_index,
- Representation new_representation);
+ Representation new_representation,
+ StoreMode store_mode);
MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
int modify_index,
- Representation representation);
- MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations();
+ Representation representation,
+ StoreMode store_mode);
+ MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations(
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason);
+
+ void PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation);
+
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ String* constructor_name();
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
@@ -5808,6 +5820,9 @@ class Map: public HeapObject {
TransitionFlag flag);
MUST_USE_RESULT MaybeObject* CopyForObserved();
+ static Handle<Map> CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing);
MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
@@ -5911,6 +5926,10 @@ class Map: public HeapObject {
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+ bool IsJSObjectMap() {
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+ }
+
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
// the descriptor array.
@@ -6581,6 +6600,8 @@ class SharedFunctionInfo: public HeapObject {
// shared function info.
void DisableOptimization(BailoutReason reason);
+ inline BailoutReason DisableOptimizationReason();
+
// Lookup the bailout ID and ASSERT that it exists in the non-optimized
// code, returns whether it asserted (i.e., always true if assertions are
// disabled).
@@ -6610,6 +6631,21 @@ class SharedFunctionInfo: public HeapObject {
inline void set_counters(int value);
inline int counters();
+ // Stores opt_count and bailout_reason as bit-fields.
+ inline void set_opt_count_and_bailout_reason(int value);
+ inline int opt_count_and_bailout_reason();
+
+ void set_bailout_reason(BailoutReason reason) {
+ set_opt_count_and_bailout_reason(
+ DisabledOptimizationReasonBits::update(opt_count_and_bailout_reason(),
+ reason));
+ }
+
+ void set_dont_optimize_reason(BailoutReason reason) {
+ set_bailout_reason(reason);
+ set_dont_optimize(reason != kNoReason);
+ }
+
// Source size of this function.
int SourceSize();
@@ -6676,8 +6712,10 @@ class SharedFunctionInfo: public HeapObject {
kEndPositionOffset + kPointerSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kPointerSize;
- static const int kOptCountOffset = kCompilerHintsOffset + kPointerSize;
- static const int kCountersOffset = kOptCountOffset + kPointerSize;
+ static const int kOptCountAndBailoutReasonOffset =
+ kCompilerHintsOffset + kPointerSize;
+ static const int kCountersOffset =
+ kOptCountAndBailoutReasonOffset + kPointerSize;
// Total size.
static const int kSize = kCountersOffset + kPointerSize;
@@ -6711,9 +6749,11 @@ class SharedFunctionInfo: public HeapObject {
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
- static const int kOptCountOffset = kCompilerHintsOffset + kIntSize;
+ static const int kOptCountAndBailoutReasonOffset =
+ kCompilerHintsOffset + kIntSize;
- static const int kCountersOffset = kOptCountOffset + kIntSize;
+ static const int kCountersOffset =
+ kOptCountAndBailoutReasonOffset + kIntSize;
// Total size.
static const int kSize = kCountersOffset + kIntSize;
@@ -6772,6 +6812,9 @@ class SharedFunctionInfo: public HeapObject {
class OptReenableTriesBits: public BitField<int, 4, 18> {};
class ICAgeBits: public BitField<int, 22, 8> {};
+ class OptCountBits: public BitField<int, 0, 22> {};
+ class DisabledOptimizationReasonBits: public BitField<int, 22, 8> {};
+
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
@@ -6956,8 +6999,7 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
- void MarkForParallelRecompilation();
- void MarkForInstallingRecompiledCode();
+ void MarkForConcurrentRecompilation();
void MarkInRecompileQueue();
// Helpers to compile this function. Returns true on success, false on
@@ -6966,18 +7008,18 @@ class JSFunction: public JSObject {
ClearExceptionFlag flag);
static bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag);
+ static Handle<Code> CompileOsr(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
ClearExceptionFlag flag);
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
- inline bool IsMarkedForParallelRecompilation();
- inline bool IsMarkedForInstallingRecompiledCode();
+ inline bool IsMarkedForConcurrentRecompilation();
- // Tells whether or not the function is on the parallel
- // recompilation queue.
+ // Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInRecompileQueue();
// Check whether or not this function is inlineable.
@@ -7042,15 +7084,14 @@ class JSFunction: public JSObject {
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
- // [next_function_link]: Field for linking functions. This list is treated as
- // a weak list by the GC.
+ // [next_function_link]: Links functions into various lists, e.g. the list
+ // of optimized functions hanging off the native_context. The CodeFlusher
+ // uses this link to chain together flushing candidates. Treated weakly
+ // by the garbage collector.
DECL_ACCESSORS(next_function_link, Object)
// Prints the name of the function using PrintF.
- inline void PrintName() {
- PrintName(stdout);
- }
- void PrintName(FILE* out);
+ void PrintName(FILE* out = stdout);
// Casting.
static inline JSFunction* cast(Object* obj);
@@ -7069,7 +7110,8 @@ class JSFunction: public JSObject {
// Retrieve the native context from a function's literal array.
static Context* NativeContextFromLiterals(FixedArray* literals);
- bool PassesHydrogenFilter();
+ // Used for flags such as --hydrogen-filter.
+ bool PassesFilter(const char* raw_filter);
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
@@ -8025,6 +8067,8 @@ class Name: public HeapObject {
// Casting.
static inline Name* cast(Object* obj);
+ bool IsCacheable(Isolate* isolate);
+
DECLARE_PRINTER(Name)
// Layout description.
@@ -8294,13 +8338,9 @@ class String: public Name {
// Dispatched behavior.
void StringShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void StringPrint() {
- StringPrint(stdout);
- }
- void StringPrint(FILE* out);
-
char* ToAsciiArray();
#endif
+ DECLARE_PRINTER(String)
DECLARE_VERIFIER(String)
inline bool IsFlat();
@@ -8752,13 +8792,14 @@ class Relocatable BASE_EMBEDDED {
virtual void IterateInstance(ObjectVisitor* v) { }
virtual void PostGarbageCollection() { }
- static void PostGarbageCollectionProcessing();
+ static void PostGarbageCollectionProcessing(Isolate* isolate);
static int ArchiveSpacePerThread();
static char* ArchiveState(Isolate* isolate, char* to);
static char* RestoreState(Isolate* isolate, char* from);
- static void Iterate(ObjectVisitor* v);
+ static void Iterate(Isolate* isolate, ObjectVisitor* v);
static void Iterate(ObjectVisitor* v, Relocatable* top);
static char* Iterate(ObjectVisitor* v, char* t);
+
private:
Isolate* isolate_;
Relocatable* prev_;
@@ -8894,7 +8935,8 @@ class Oddball: public HeapObject {
DECLARE_VERIFIER(Oddball)
// Initialize the fields.
- MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
+ MUST_USE_RESULT MaybeObject* Initialize(Heap* heap,
+ const char* to_string,
Object* to_number,
byte kind);
@@ -9044,11 +9086,6 @@ class JSProxy: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithHandler(
- JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode);
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
@@ -9069,10 +9106,8 @@ class JSProxy: public JSReceiver {
JSReceiver* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
- // Turn this into an (empty) JSObject.
- void Fix();
+ // Turn the proxy into an (empty) JSObject.
+ static void Fix(Handle<JSProxy> proxy);
// Initializes the body after the handler slot.
inline void InitializeBody(int object_size, Object* value);
@@ -9107,13 +9142,23 @@ class JSProxy: public JSReceiver {
private:
friend class JSReceiver;
- static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> object,
+ static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
+
+ static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
DeleteMode mode);
- static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> object,
+ static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> proxy,
uint32_t index,
DeleteMode mode);
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ static Handle<Object> GetIdentityHash(Handle<JSProxy> proxy,
+ CreationFlag flag);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9514,6 +9559,11 @@ class AccessorInfo: public Struct {
// Dispatched behavior.
DECLARE_VERIFIER(AccessorInfo)
+ // Append all descriptors to the array that are not already there.
+ // Return number added.
+ static int AppendUnique(Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors);
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
@@ -9677,10 +9727,18 @@ class ExecutableAccessorInfo: public AccessorInfo {
// * undefined: considered an accessor by the spec, too, strangely enough
// * the hole: an accessor which has not been set
// * a pointer to a map: a transition used to ensure map sharing
+// access_flags provides the ability to override access checks on access check
+// failure.
class AccessorPair: public Struct {
public:
DECL_ACCESSORS(getter, Object)
DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(access_flags, Smi)
+
+ inline void set_access_flags(v8::AccessControl access_control);
+ inline bool all_can_read();
+ inline bool all_can_write();
+ inline bool prohibits_overwriting();
static inline AccessorPair* cast(Object* obj);
@@ -9717,9 +9775,14 @@ class AccessorPair: public Struct {
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kSize = kSetterOffset + kPointerSize;
+ static const int kAccessFlagsOffset = kSetterOffset + kPointerSize;
+ static const int kSize = kAccessFlagsOffset + kPointerSize;
private:
+ static const int kAllCanReadBit = 0;
+ static const int kAllCanWriteBit = 1;
+ static const int kProhibitsOverwritingBit = 2;
+
// Strangely enough, in addition to functions and harmony proxies, the spec
// requires us to consider undefined as a kind of accessor, too:
// var obj = {};
@@ -9807,12 +9870,15 @@ class TemplateInfo: public Struct {
public:
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(property_list, Object)
+ DECL_ACCESSORS(property_accessors, Object)
DECLARE_VERIFIER(TemplateInfo)
- static const int kTagOffset = HeapObject::kHeaderSize;
+ static const int kTagOffset = HeapObject::kHeaderSize;
static const int kPropertyListOffset = kTagOffset + kPointerSize;
- static const int kHeaderSize = kPropertyListOffset + kPointerSize;
+ static const int kPropertyAccessorsOffset =
+ kPropertyListOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
@@ -9823,7 +9889,6 @@ class FunctionTemplateInfo: public TemplateInfo {
public:
DECL_ACCESSORS(serial_number, Object)
DECL_ACCESSORS(call_code, Object)
- DECL_ACCESSORS(property_accessors, Object)
DECL_ACCESSORS(prototype_template, Object)
DECL_ACCESSORS(parent_template, Object)
DECL_ACCESSORS(named_property_handler, Object)
@@ -9845,6 +9910,8 @@ class FunctionTemplateInfo: public TemplateInfo {
// requires access check.
DECL_BOOLEAN_ACCESSORS(needs_access_check)
DECL_BOOLEAN_ACCESSORS(read_only_prototype)
+ DECL_BOOLEAN_ACCESSORS(remove_prototype)
+ DECL_BOOLEAN_ACCESSORS(do_not_cache)
static inline FunctionTemplateInfo* cast(Object* obj);
@@ -9854,9 +9921,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
- static const int kPropertyAccessorsOffset = kCallCodeOffset + kPointerSize;
static const int kPrototypeTemplateOffset =
- kPropertyAccessorsOffset + kPointerSize;
+ kCallCodeOffset + kPointerSize;
static const int kParentTemplateOffset =
kPrototypeTemplateOffset + kPointerSize;
static const int kNamedPropertyHandlerOffset =
@@ -9880,6 +9946,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kUndetectableBit = 1;
static const int kNeedsAccessCheckBit = 2;
static const int kReadOnlyPrototypeBit = 3;
+ static const int kRemovePrototypeBit = 4;
+ static const int kDoNotCacheBit = 5;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 337ddd4b2..085143d99 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -39,7 +39,7 @@ namespace internal {
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- { ScopedLock lock(thread_id_mutex_);
+ { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
thread_id_ = ThreadId::Current().ToInteger();
}
#endif
@@ -48,26 +48,26 @@ void OptimizingCompilerThread::Run() {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- int64_t epoch = 0;
- if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
+ ElapsedTimer total_timer;
+ if (FLAG_trace_concurrent_recompilation) total_timer.Start();
while (true) {
- input_queue_semaphore_->Wait();
+ input_queue_semaphore_.Wait();
Logger::TimerEventScope timer(
- isolate_, Logger::TimerEventScope::v8_recompile_parallel);
+ isolate_, Logger::TimerEventScope::v8_recompile_concurrent);
- if (FLAG_parallel_recompilation_delay != 0) {
- OS::Sleep(FLAG_parallel_recompilation_delay);
+ if (FLAG_concurrent_recompilation_delay != 0) {
+ OS::Sleep(FLAG_concurrent_recompilation_delay);
}
switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
case CONTINUE:
break;
case STOP:
- if (FLAG_trace_parallel_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
+ if (FLAG_trace_concurrent_recompilation) {
+ time_spent_total_ = total_timer.Elapsed();
}
- stop_semaphore_->Signal();
+ stop_semaphore_.Signal();
return;
case FLUSH:
// The main thread is blocked, waiting for the stop semaphore.
@@ -76,18 +76,18 @@ void OptimizingCompilerThread::Run() {
}
Release_Store(&queue_length_, static_cast<AtomicWord>(0));
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
- stop_semaphore_->Signal();
+ stop_semaphore_.Signal();
// Return to start of consumer loop.
continue;
}
- int64_t compiling_start = 0;
- if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
+ ElapsedTimer compiling_timer;
+ if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
CompileNext();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_compiling_ += OS::Ticks() - compiling_start;
+ if (FLAG_trace_concurrent_recompilation) {
+ time_spent_compiling_ += compiling_timer.Elapsed();
}
}
}
@@ -108,12 +108,15 @@ void OptimizingCompilerThread::CompileNext() {
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- ScopedLock mark_and_queue(install_mutex_);
- { Heap::RelocationLock relocation_lock(isolate_->heap());
- AllowHandleDereference ahd;
- optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+ if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
+ ASSERT(FLAG_concurrent_osr);
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ osr_candidates_.RemoveElement(optimizing_compiler);
+ ready_for_osr_.Add(optimizing_compiler);
+ } else {
+ output_queue_.Enqueue(optimizing_compiler);
+ isolate_->stack_guard()->RequestInstallCode();
}
- output_queue_.Enqueue(optimizing_compiler);
}
@@ -123,7 +126,7 @@ void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
while (input_queue_.Dequeue(&optimizing_compiler)) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
- input_queue_semaphore_->Wait();
+ input_queue_semaphore_.Wait();
CompilationInfo* info = optimizing_compiler->info();
if (restore_function_code) {
Handle<JSFunction> function = info->closure();
@@ -145,14 +148,17 @@ void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
}
delete info;
}
+
+ osr_candidates_.Clear();
+ RemoveStaleOSRCandidates(0);
}
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
- input_queue_semaphore_->Signal();
- stop_semaphore_->Wait();
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
FlushOutputQueue(true);
}
@@ -160,10 +166,10 @@ void OptimizingCompilerThread::Flush() {
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
- input_queue_semaphore_->Signal();
- stop_semaphore_->Wait();
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
- if (FLAG_parallel_recompilation_delay != 0) {
+ if (FLAG_concurrent_recompilation_delay != 0) {
// Barrier when loading queue length is not necessary since the write
// happens in CompileNext on the same thread.
// This is used only for testing.
@@ -174,13 +180,15 @@ void OptimizingCompilerThread::Stop() {
FlushOutputQueue(false);
}
- if (FLAG_trace_parallel_recompilation) {
- double compile_time = static_cast<double>(time_spent_compiling_);
- double total_time = static_cast<double>(time_spent_total_);
- double percentage = (compile_time * 100) / total_time;
+ if (FLAG_trace_concurrent_recompilation) {
+ double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
+ if (FLAG_trace_osr && FLAG_concurrent_osr) {
+ PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
+ }
+
Join();
}
@@ -190,12 +198,13 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
OptimizingCompiler* compiler;
while (true) {
- { // Memory barrier to ensure marked functions are queued.
- ScopedLock marked_and_queued(install_mutex_);
- if (!output_queue_.Dequeue(&compiler)) return;
- }
+ if (!output_queue_.Dequeue(&compiler)) return;
Compiler::InstallOptimizedCode(compiler);
}
+
+ // Remove the oldest OSR candidates that are ready so that we
+ // only have limited number of them waiting.
+ if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
}
@@ -204,16 +213,82 @@ void OptimizingCompilerThread::QueueForOptimization(
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
- optimizing_compiler->info()->closure()->MarkInRecompileQueue();
+ if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
+ optimizing_compiler->info()->closure()->MarkInRecompileQueue();
+ } else {
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ osr_candidates_.Add(optimizing_compiler);
+ osr_attempts_++;
+ }
input_queue_.Enqueue(optimizing_compiler);
- input_queue_semaphore_->Signal();
+ input_queue_semaphore_.Signal();
+}
+
+
+OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
+ Handle<JSFunction> function, uint32_t osr_pc_offset) {
+ ASSERT(!IsOptimizerThread());
+ OptimizingCompiler* result = NULL;
+ { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ for (int i = 0; i < ready_for_osr_.length(); i++) {
+ if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ osr_hits_++;
+ result = ready_for_osr_.Remove(i);
+ break;
+ }
+ }
+ }
+ RemoveStaleOSRCandidates();
+ return result;
+}
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
+ uint32_t osr_pc_offset) {
+ ASSERT(!IsOptimizerThread());
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ for (int i = 0; i < osr_candidates_.length(); i++) {
+ if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
+ ASSERT(!IsOptimizerThread());
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ for (int i = 0; i < osr_candidates_.length(); i++) {
+ if (*osr_candidates_[i]->info()->closure() == function) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
+ ASSERT(!IsOptimizerThread());
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ while (ready_for_osr_.length() > limit) {
+ OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
+ CompilationInfo* throw_away = compiler->info();
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - Discarded ");
+ throw_away->closure()->PrintName();
+ PrintF(", AST id %d]\n",
+ throw_away->osr_ast_id().ToInt());
+ }
+ delete throw_away;
+ }
}
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_parallel_recompilation) return false;
- ScopedLock lock(thread_id_mutex_);
+ if (!FLAG_concurrent_recompilation) return false;
+ LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index cbd4d0e48..d1ed6a2c5 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -30,7 +30,10 @@
#include "atomicops.h"
#include "flags.h"
+#include "list.h"
#include "platform.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
#include "unbound-queue-inl.h"
namespace v8 {
@@ -46,23 +49,29 @@ class OptimizingCompilerThread : public Thread {
Thread("OptimizingCompilerThread"),
#ifdef DEBUG
thread_id_(0),
- thread_id_mutex_(OS::CreateMutex()),
#endif
isolate_(isolate),
- stop_semaphore_(OS::CreateSemaphore(0)),
- input_queue_semaphore_(OS::CreateSemaphore(0)),
- install_mutex_(OS::CreateMutex()),
- time_spent_compiling_(0),
- time_spent_total_(0) {
+ stop_semaphore_(0),
+ input_queue_semaphore_(0),
+ osr_candidates_(2),
+ ready_for_osr_(2),
+ osr_hits_(0),
+ osr_attempts_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
+ ~OptimizingCompilerThread() {}
void Run();
void Stop();
void Flush();
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
+ OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
+ uint32_t osr_pc_offset);
+ bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
+
+ bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
@@ -75,45 +84,52 @@ class OptimizingCompilerThread : public Thread {
// only one thread can run inside an Isolate at one time, a direct
// doesn't introduce a race -- queue_length_ may decreased in
// meantime, but not increased.
- return (current_length < FLAG_parallel_recompilation_queue_length);
+ return (current_length < FLAG_concurrent_recompilation_queue_length);
}
#ifdef DEBUG
bool IsOptimizerThread();
#endif
- ~OptimizingCompilerThread() {
- delete install_mutex_;
- delete input_queue_semaphore_;
- delete stop_semaphore_;
-#ifdef DEBUG
- delete thread_id_mutex_;
-#endif
- }
-
private:
enum StopFlag { CONTINUE, STOP, FLUSH };
+ // Remove the oldest OSR candidates that are ready so that we
+ // only have |limit| left waiting.
+ void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
+
void FlushInputQueue(bool restore_function_code);
void FlushOutputQueue(bool restore_function_code);
-
void CompileNext();
#ifdef DEBUG
int thread_id_;
- Mutex* thread_id_mutex_;
+ Mutex thread_id_mutex_;
#endif
Isolate* isolate_;
- Semaphore* stop_semaphore_;
- Semaphore* input_queue_semaphore_;
+ Semaphore stop_semaphore_;
+ Semaphore input_queue_semaphore_;
+
+ // Queue of incoming recompilation tasks (including OSR).
UnboundQueue<OptimizingCompiler*> input_queue_;
+ // Queue of recompilation tasks ready to be installed (excluding OSR).
UnboundQueue<OptimizingCompiler*> output_queue_;
- Mutex* install_mutex_;
+ // List of all OSR related recompilation tasks (both incoming and ready ones).
+ List<OptimizingCompiler*> osr_candidates_;
+ // List of recompilation tasks ready for OSR.
+ List<OptimizingCompiler*> ready_for_osr_;
+
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
- int64_t time_spent_compiling_;
- int64_t time_spent_total_;
+ TimeDelta time_spent_compiling_;
+ TimeDelta time_spent_total_;
+
+ Mutex osr_list_mutex_;
+ int osr_hits_;
+ int osr_attempts_;
+
+ static const int kReadyForOSRLimit = 4;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 494779039..05ae11e42 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -542,6 +542,7 @@ Parser::Parser(CompilationInfo* info)
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
top_scope_(NULL),
+ original_scope_(NULL),
current_function_state_(NULL),
target_stack_(NULL),
extension_(info->extension()),
@@ -568,10 +569,13 @@ Parser::Parser(CompilationInfo* info)
FunctionLiteral* Parser::ParseProgram() {
- HistogramTimerScope timer(isolate()->counters()->parse());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
@@ -592,7 +596,7 @@ FunctionLiteral* Parser::ParseProgram() {
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
if (info()->is_eval()) {
PrintF("[parsing eval");
} else if (info()->script()->name()->IsString()) {
@@ -622,6 +626,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
}
+ original_scope_ = scope;
if (info->is_eval()) {
if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
scope = NewScope(scope, EVAL_SCOPE);
@@ -682,6 +687,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::kNotParenthesized,
FunctionLiteral::kNotGenerator);
result->set_ast_properties(factory()->visitor()->ast_properties());
+ result->set_dont_optimize_reason(
+ factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow_) {
isolate()->StackOverflow();
}
@@ -695,10 +702,13 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral* Parser::ParseLazy() {
- HistogramTimerScope timer(isolate()->counters()->parse_lazy());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
// Initialize parser state.
@@ -718,7 +728,7 @@ FunctionLiteral* Parser::ParseLazy() {
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
}
@@ -749,6 +759,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
+ original_scope_ = scope;
FunctionState function_state(this, scope, isolate());
ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
@@ -3735,8 +3746,9 @@ bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
}
-Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
- Factory* factory = Isolate::Current()->factory();
+Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
+ Expression* expression) {
+ Factory* factory = isolate->factory();
ASSERT(IsCompileTimeValue(expression));
Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
ObjectLiteral* object_literal = expression->AsObjectLiteral();
@@ -3775,7 +3787,7 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
return expression->AsLiteral()->value();
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(expression);
+ return CompileTimeValue::GetValue(isolate(), expression);
}
return isolate()->factory()->uninitialized_value();
}
@@ -4279,10 +4291,38 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
+ //
+ // One tricky case are function declarations in a local sloppy-mode eval:
+ // their declaration is hoisted, but they still see the local scope. E.g.,
+ //
+ // function() {
+ // var x = 0
+ // try { throw 1 } catch (x) { eval("function g() { return x }") }
+ // return g()
+ // }
+ //
+ // needs to return 1. To distinguish such cases, we need to detect
+ // (1) whether a function stems from a sloppy eval, and
+ // (2) whether it actually hoists across the eval.
+ // Unfortunately, we do not represent sloppy eval scopes, so we do not have
+ // either information available directly, especially not when lazily compiling
+ // a function like 'g'. We hence rely on the following invariants:
+ // - (1) is the case iff the innermost scope of the deserialized scope chain
+ // under which we compile is _not_ a declaration scope. This holds because
+ // in all normal cases, function declarations are fully hoisted to a
+ // declaration scope and compiled relative to that.
+ // - (2) is the case iff the current declaration scope is still the original
+ // one relative to the deserialized scope chain. Otherwise we must be
+ // compiling a function in an inner declaration scope in the eval, e.g. a
+ // nested function, and hoisting works normally relative to that.
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
- (function_type == FunctionLiteral::DECLARATION && !is_extended_mode())
- ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
+ (original_scope_ == original_declaration_scope ||
+ declaration_scope != original_declaration_scope)
+ ? NewScope(declaration_scope, FUNCTION_SCOPE)
+ : NewScope(top_scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -4296,6 +4336,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
? FunctionLiteral::kIsGenerator
: FunctionLiteral::kNotGenerator;
AstProperties ast_properties;
+ BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
{ FunctionState function_state(this, scope, isolate());
top_scope_->SetScopeName(function_name);
@@ -4555,6 +4596,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
+ dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
if (is_extended_mode()) {
@@ -4576,6 +4618,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
generator);
function_literal->set_function_token_position(function_token_position);
function_literal->set_ast_properties(&ast_properties);
+ function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
@@ -4986,7 +5029,7 @@ RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
bool multiline,
Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
zone_(zone),
error_(error),
captures_(NULL),
@@ -5858,9 +5901,9 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
-ScriptDataImpl* PreParserApi::PreParse(Utf16CharacterStream* source) {
+ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate,
+ Utf16CharacterStream* source) {
CompleteParserRecorder recorder;
- Isolate* isolate = Isolate::Current();
HistogramTimerScope timer(isolate->counters()->pre_parse());
Scanner scanner(isolate->unicode_cache());
intptr_t stack_limit = isolate->stack_guard()->real_climit();
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 68a74b78a..783626ad1 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -170,7 +170,8 @@ class PreParserApi {
// This interface is here instead of in preparser.h because it instantiates a
// preparser recorder object that is suited to the parser's purposes. Also,
// the preparser doesn't know about ScriptDataImpl.
- static ScriptDataImpl* PreParse(Utf16CharacterStream* source);
+ static ScriptDataImpl* PreParse(Isolate* isolate,
+ Utf16CharacterStream* source);
};
@@ -855,6 +856,7 @@ class Parser BASE_EMBEDDED {
Scanner scanner_;
preparser::PreParser* reusable_preparser_;
Scope* top_scope_;
+ Scope* original_scope_; // for ES5 function declarations in sloppy eval
FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
v8::Extension* extension_;
@@ -893,7 +895,7 @@ class CompileTimeValue: public AllStatic {
static bool IsCompileTimeValue(Expression* expression);
// Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Expression* expression);
+ static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
// Get the type of a compile time value returned by GetValue().
static LiteralType GetLiteralType(Handle<FixedArray> value);
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 4c7b01759..4d3b1e313 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -52,9 +52,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -76,31 +73,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -108,11 +80,10 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -170,7 +141,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -181,7 +152,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -265,8 +235,9 @@ static void* GetRandomAddr() {
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
- uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
- | kAllocationRandomAddressMin;
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
@@ -365,8 +336,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
@@ -375,7 +344,7 @@ bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
+ PAGE_NOACCESS)) {
return false;
}
return true;
@@ -397,87 +366,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class CygwinSemaphore : public Semaphore {
- public:
- explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void CygwinSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool CygwinSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new CygwinSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index e0917fa56..d81827805 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -63,9 +63,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -84,31 +81,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
@@ -117,11 +89,10 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -184,7 +155,7 @@ static unsigned StringToLong(char* buffer) {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@@ -218,7 +189,7 @@ void OS::LogSharedLibraryAddresses() {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+ LOG(isolate SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
}
@@ -345,8 +316,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -371,78 +340,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class FreeBSDSemaphore : public Semaphore {
- public:
- explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void FreeBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-bool FreeBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new FreeBSDSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 885683398..b8b96025e 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -76,143 +76,7 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
- const char* file_name = "/proc/cpuinfo";
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r"))) {
- OS::PrintError("Failed to open /proc/cpuinfo\n");
- return false;
- }
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- // Simple detection of VFP at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to ARM (mid 2009), no similar
- // facility is universally available on the ARM architectures,
- // so it's up to individual OSes to provide such.
- switch (feature) {
- case VFP3:
- search_string = "vfpv3";
- break;
- case NEON:
- search_string = "neon";
- break;
- case ARMv7:
- search_string = "ARMv7";
- break;
- case SUDIV:
- search_string = "idiva";
- break;
- case VFP32DREGS:
- // This case is handled specially below.
- break;
- default:
- UNREACHABLE();
- }
-
- if (feature == VFP32DREGS) {
- return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16");
- }
-
- if (CPUInfoContainsString(search_string)) {
- return true;
- }
-
- if (feature == VFP3) {
- // Some old kernels will report vfp not vfpv3. Here we make a last attempt
- // to detect vfpv3 by checking for vfp *and* neon, since neon is only
- // available on architectures with vfpv3.
- // Checking neon on its own is not enough as it is possible to have neon
- // without vfp.
- if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
- return true;
- }
- }
-
- return false;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- static bool use_cached_value = false;
- static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
- if (use_cached_value) {
- return cached_value;
- }
- if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
- cached_value = ARM_IMPLEMENTER;
- } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
- cached_value = QUALCOMM_IMPLEMENTER;
- } else {
- cached_value = UNKNOWN_IMPLEMENTER;
- }
- use_cached_value = true;
- return cached_value;
-}
-
-
-CpuPart OS::GetCpuPart(CpuImplementer implementer) {
- static bool use_cached_value = false;
- static CpuPart cached_value = CPU_UNKNOWN;
- if (use_cached_value) {
- return cached_value;
- }
- if (implementer == ARM_IMPLEMENTER) {
- if (CPUInfoContainsString("CPU part\t: 0xc0f")) {
- cached_value = CORTEX_A15;
- } else if (CPUInfoContainsString("CPU part\t: 0xc0c")) {
- cached_value = CORTEX_A12;
- } else if (CPUInfoContainsString("CPU part\t: 0xc09")) {
- cached_value = CORTEX_A9;
- } else if (CPUInfoContainsString("CPU part\t: 0xc08")) {
- cached_value = CORTEX_A8;
- } else if (CPUInfoContainsString("CPU part\t: 0xc07")) {
- cached_value = CORTEX_A7;
- } else if (CPUInfoContainsString("CPU part\t: 0xc05")) {
- cached_value = CORTEX_A5;
- } else {
- cached_value = CPU_UNKNOWN;
- }
- } else {
- cached_value = CPU_UNKNOWN;
- }
- use_cached_value = true;
- return cached_value;
-}
-
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
@@ -255,60 +119,6 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-#ifdef __mips__
-bool OS::MipsCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- const char* file_name = "/proc/cpuinfo";
- // Simple detection of FPU at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to MIPS (early 2010), no similar
- // facility is universally available on the MIPS architectures,
- // so it's up to individual OSes to provide such.
- //
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
-
- switch (feature) {
- case FPU:
- search_string = "FPU";
- break;
- default:
- UNREACHABLE();
- }
-
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r"))) {
- OS::PrintError("Failed to open /proc/cpuinfo\n");
- return false;
- }
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-#endif // def __mips__
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -327,31 +137,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -365,7 +150,6 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -436,7 +220,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -447,7 +231,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -659,7 +442,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -683,88 +465,4 @@ bool VirtualMemory::HasLazyCommits() {
return true;
}
-
-class LinuxSemaphore : public Semaphore {
- public:
- explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void LinuxSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool LinuxSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result > 0) {
- // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
- errno = result;
- result = -1;
- }
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new LinuxSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 6135cd137..67cc96f93 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -79,34 +79,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
@@ -131,7 +103,6 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -202,7 +173,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
@@ -221,7 +192,7 @@ void OS::LogSharedLibraryAddresses() {
if (code_ptr == NULL) continue;
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(Isolate::Current(),
+ LOG(isolate,
SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
}
@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* address,
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(address, size);
return true;
}
@@ -391,65 +360,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class MacOSSemaphore : public Semaphore {
- public:
- explicit MacOSSemaphore(int count) {
- int r;
- r = semaphore_create(mach_task_self(),
- &semaphore_,
- SYNC_POLICY_FIFO,
- count);
- ASSERT(r == KERN_SUCCESS);
- }
-
- ~MacOSSemaphore() {
- int r;
- r = semaphore_destroy(mach_task_self(), semaphore_);
- ASSERT(r == KERN_SUCCESS);
- }
-
- void Wait() {
- int r;
- do {
- r = semaphore_wait(semaphore_);
- ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
- } while (r == KERN_ABORTED);
- }
-
- bool Wait(int timeout);
-
- void Signal() { semaphore_signal(semaphore_); }
-
- private:
- semaphore_t semaphore_;
-};
-
-
-bool MacOSSemaphore::Wait(int timeout) {
- mach_timespec_t ts;
- ts.tv_sec = timeout / 1000000;
- ts.tv_nsec = (timeout % 1000000) * 1000;
- return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new MacOSSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
deleted file mode 100644
index dd5a3ddb3..000000000
--- a/deps/v8/src/platform-nullos.cc
+++ /dev/null
@@ -1,573 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for NULLOS goes here
-
-// Minimal include to get access to abort, fprintf and friends for bootstrapping
-// messages.
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Give V8 the opportunity to override the default ceil behaviour.
-double ceiling(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Give V8 the opportunity to override the default fmod behavior.
-double modulo(double x, double y) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_sin(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_cos(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_tan(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_log(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Initialize OS class early in the V8 startup.
-void OS::SetUp() {
- // Seed the random number generator.
- UNIMPLEMENTED();
-}
-
-
-void OS::PostSetUp() {
- UNIMPLEMENTED();
-}
-
-
-void OS::TearDown() {
- UNIMPLEMENTED();
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- UNIMPLEMENTED();
- *secs = 0;
- *usecs = 0;
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns ticks in microsecond resolution.
-int64_t OS::Ticks() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- UNIMPLEMENTED();
- return "<none>";
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given time.
-double OS::DaylightSavingsOffset(double time) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::GetLastError() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- UNIMPLEMENTED();
-}
-
-
-// Print (debug) message to console.
-void OS::VPrint(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- vfprintf(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- // Minimalistic implementation for bootstrapping.
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-// Print error message to console.
-void OS::VPrintError(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-double OS::nan_value() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- UNIMPLEMENTED();
-}
-
-
-CpuPart OS::GetCpuPart(CpuImplementer implementer) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmUsingHardFloat() {
- UNIMPLEMENTED();
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-size_t OS::AllocateAlignment() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): potential system call return value which is ignored here.
- UNIMPLEMENTED();
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Sleep(int milliseconds) {
- UNIMPLEMENTED();
-}
-
-
-int OS::NumberOfCores() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void OS::Abort() {
- // Minimalistic implementation for bootstrapping.
- abort();
-}
-
-
-void OS::DebugBreak() {
- UNIMPLEMENTED();
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- UNIMPLEMENTED();
-}
-
-
-void OS::SignalCodeMovingGC() {
- UNIMPLEMENTED();
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-VirtualMemory::VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::~VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::IsReserved() {
- UNIMPLEMENTED();
- return false;
-}
-
-
-void VirtualMemory::Reset() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-
- void* pd_data_;
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size),
- start_semaphore_(NULL) {
- set_name(options.name);
- UNIMPLEMENTED();
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
- UNIMPLEMENTED();
-}
-
-
-Thread::~Thread() {
- delete data_;
- UNIMPLEMENTED();
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- UNIMPLEMENTED();
-}
-
-
-void Thread::Join() {
- UNIMPLEMENTED();
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- UNIMPLEMENTED();
- return static_cast<LocalStorageKey>(0);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- UNIMPLEMENTED();
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- UNIMPLEMENTED();
-}
-
-
-void Thread::YieldCPU() {
- UNIMPLEMENTED();
-}
-
-
-class NullMutex : public Mutex {
- public:
- NullMutex() : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullMutex() {
- UNIMPLEMENTED();
- }
-
- virtual int Lock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual int Unlock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- void* data_;
-};
-
-
-Mutex* OS::CreateMutex() {
- UNIMPLEMENTED();
- return new NullMutex();
-}
-
-
-class NullSemaphore : public Semaphore {
- public:
- explicit NullSemaphore(int count) : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullSemaphore() {
- UNIMPLEMENTED();
- }
-
- virtual void Wait() {
- UNIMPLEMENTED();
- }
-
- virtual void Signal() {
- UNIMPLEMENTED();
- }
- private:
- void* data_;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- UNIMPLEMENTED();
- return new NullSemaphore(count);
-}
-
-
-class ProfileSampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-};
-
-
-ProfileSampler::ProfileSampler(int interval) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData();
- interval_ = interval;
- active_ = false;
-}
-
-
-ProfileSampler::~ProfileSampler() {
- UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
-}
-
-
-void ProfileSampler::Start() {
- UNIMPLEMENTED();
-}
-
-
-void ProfileSampler::Stop() {
- UNIMPLEMENTED();
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index e59160109..30a484f4b 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -61,9 +61,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -82,31 +79,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -120,7 +92,6 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -178,7 +149,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -189,7 +160,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -402,8 +372,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -428,87 +396,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class OpenBSDSemaphore : public Semaphore {
- public:
- explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void OpenBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool OpenBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
-
- int to = ts.tv_sec;
-
- while (true) {
- int result = sem_trywait(&sem_);
- if (result == 0) return true; // Successfully got semaphore.
- if (!to) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- usleep(ts.tv_nsec / 1000);
- to--;
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new OpenBSDSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 13b819bd1..fe27eaf71 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -69,6 +69,7 @@
#include "v8.h"
#include "codegen.h"
+#include "isolate-inl.h"
#include "platform.h"
namespace v8 {
@@ -79,11 +80,11 @@ static const pthread_t kNoThread = (pthread_t) 0;
uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if defined(__APPLE__)
+#if V8_OS_MACOSX
// Mac OS X requires all these to install so we can assume they are present.
// These constants are defined by the CPUid instructions.
const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
+ return (one << SSE2) | (one << CMOV);
#else
return 0; // Nothing special about the other systems.
#endif
@@ -152,7 +153,7 @@ void OS::ProtectCode(void* address, const size_t size) {
void OS::Guard(void* address, const size_t size) {
#if defined(__CYGWIN__)
DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
mprotect(address, size, PROT_NONE);
#endif
@@ -171,17 +172,14 @@ void* OS::GetRandomMmapAddr() {
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
+ uintptr_t raw_addr;
+ isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
#if V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
-
raw_addr &= 0x3ffff000;
# ifdef __sun
@@ -219,11 +217,6 @@ void OS::Sleep(int milliseconds) {
}
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
if (FLAG_break_on_abort) {
@@ -318,19 +311,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
double OS::TimeCurrentMillis() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) return 0.0;
- return (static_cast<double>(tv.tv_sec) * 1000) +
- (static_cast<double>(tv.tv_usec) / 1000);
-}
-
-
-int64_t OS::Ticks() {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0)
- return 0;
- return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+ return Time::Now().ToJsTime();
}
@@ -756,244 +737,4 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-class POSIXMutex : public Mutex {
- public:
- POSIXMutex() {
- pthread_mutexattr_t attr;
- memset(&attr, 0, sizeof(attr));
- int result = pthread_mutexattr_init(&attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_destroy(&attr);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~POSIXMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
-
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new POSIXMutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX socket support.
-//
-
-class POSIXSocket : public Socket {
- public:
- explicit POSIXSocket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (IsValid()) {
- // Allow rapid reuse.
- static const int kOn = 1;
- int ret = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- &kOn, sizeof(kOn));
- ASSERT(ret == 0);
- USE(ret);
- }
- }
- explicit POSIXSocket(int socket): socket_(socket) { }
- virtual ~POSIXSocket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != -1; }
-
- private:
- int socket_;
-};
-
-
-bool POSIXSocket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- BitCast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool POSIXSocket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* POSIXSocket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- int socket;
- do {
- socket = accept(socket_, NULL, NULL);
- } while (socket == -1 && errno == EINTR);
-
- if (socket == -1) {
- return NULL;
- } else {
- return new POSIXSocket(socket);
- }
-}
-
-
-bool POSIXSocket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- do {
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
- } while (status == -1 && errno == EINTR);
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool POSIXSocket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SHUT_RDWR);
- close(socket_);
- socket_ = -1;
- return status == 0;
- }
- return true;
-}
-
-
-int POSIXSocket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else if (errno != EINTR) {
- return 0;
- }
- }
- return written;
-}
-
-
-int POSIXSocket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status;
- do {
- status = recv(socket_, data, len, 0);
- } while (status == -1 && errno == EINTR);
- return (status < 0) ? 0 : status;
-}
-
-
-bool POSIXSocket::SetReuseAddress(bool reuse_address) {
- int on = reuse_address ? 1 : 0;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
- return status == 0;
-}
-
-
-bool Socket::SetUp() {
- // Nothing to do on POSIX.
- return true;
-}
-
-
-int Socket::LastError() {
- return errno;
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new POSIXSocket();
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index b1d88af29..f082af125 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -81,9 +81,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -99,31 +96,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -132,11 +104,10 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -194,7 +165,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
}
@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -392,100 +361,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class SolarisSemaphore : public Semaphore {
- public:
- explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void SolarisSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-#ifndef timeradd
-#define timeradd(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
- if ((result)->tv_usec >= 1000000) { \
- ++(result)->tv_sec; \
- (result)->tv_usec -= 1000000; \
- } \
- } while (0)
-#endif
-
-
-bool SolarisSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new SolarisSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 292c24a3d..ea4f7ea11 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -38,12 +38,12 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include "v8.h"
#include "codegen.h"
+#include "isolate-inl.h"
#include "platform.h"
#include "simulator.h"
#include "vm-state-inl.h"
@@ -125,13 +125,6 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
#endif // __MINGW32__
-// Generate a pseudo-random number in the range 0-2^31-1. Usually
-// defined in stdlib.h. Missing in both Microsoft Visual Studio C++ and MinGW.
-int random() {
- return rand();
-}
-
-
namespace v8 {
namespace internal {
@@ -145,8 +138,6 @@ double ceiling(double x) {
}
-static Mutex* limit_mutex = NULL;
-
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
@@ -246,19 +237,15 @@ void MathSetup() {
// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
// January 1, 1970.
-class Time {
+class Win32Time {
public:
// Constructors.
- Time();
- explicit Time(double jstime);
- Time(int year, int mon, int day, int hour, int min, int sec);
+ explicit Win32Time(double jstime);
+ Win32Time(int year, int mon, int day, int hour, int min, int sec);
// Convert timestamp to JavaScript representation.
double ToJSTime();
- // Set timestamp to current time.
- void SetToCurrentTime();
-
// Returns the local timezone offset in milliseconds east of UTC. This is
// the number of milliseconds you must add to UTC to get local time, i.e.
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
@@ -300,10 +287,6 @@ class Time {
// Return whether or not daylight savings time is in effect at this time.
bool InDST();
- // Return the difference (in milliseconds) between this timestamp and
- // another timestamp.
- int64_t Diff(Time* other);
-
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -325,26 +308,20 @@ class Time {
// Static variables.
-bool Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Time::tzinfo_;
-char Time::std_tz_name_[kTzNameSize];
-char Time::dst_tz_name_[kTzNameSize];
-
-
-// Initialize timestamp to start of epoc.
-Time::Time() {
- t() = 0;
-}
+bool Win32Time::tz_initialized_ = false;
+TIME_ZONE_INFORMATION Win32Time::tzinfo_;
+char Win32Time::std_tz_name_[kTzNameSize];
+char Win32Time::dst_tz_name_[kTzNameSize];
// Initialize timestamp from a JavaScript timestamp.
-Time::Time(double jstime) {
+Win32Time::Win32Time(double jstime) {
t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
}
// Initialize timestamp from date/time components.
-Time::Time(int year, int mon, int day, int hour, int min, int sec) {
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
SYSTEMTIME st;
st.wYear = year;
st.wMonth = mon;
@@ -358,14 +335,14 @@ Time::Time(int year, int mon, int day, int hour, int min, int sec) {
// Convert timestamp to JavaScript timestamp.
-double Time::ToJSTime() {
+double Win32Time::ToJSTime() {
return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
}
// Guess the name of the timezone from the bias.
// The guess is very biased towards the northern hemisphere.
-const char* Time::GuessTimezoneNameFromBias(int bias) {
+const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
static const int kHour = 60;
switch (-bias) {
case -9*kHour: return "Alaska";
@@ -390,7 +367,7 @@ const char* Time::GuessTimezoneNameFromBias(int bias) {
// Initialize timezone information. The timezone information is obtained from
// windows. If we cannot get the timezone information we fall back to CET.
// Please notice that this code is not thread-safe.
-void Time::TzSet() {
+void Win32Time::TzSet() {
// Just return if timezone information has already been initialized.
if (tz_initialized_) return;
@@ -439,78 +416,16 @@ void Time::TzSet() {
}
-// Return the difference in milliseconds between this and another timestamp.
-int64_t Time::Diff(Time* other) {
- return (t() - other->t()) / kTimeScaler;
-}
-
-
-// Set timestamp to current time.
-void Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Check if we need to resync due to backwards time change.
- needs_resync |= time_now.t_ < init_time.t_;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Time::LocalOffset() {
+int64_t Win32Time::LocalOffset() {
// Initialize timezone information, if needed.
TzSet();
- Time rounded_to_second(*this);
+ Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
1000 * kTimeScaler;
// Convert to local time using POSIX localtime function.
@@ -541,7 +456,7 @@ int64_t Time::LocalOffset() {
// Return whether or not daylight savings time is in effect at this time.
-bool Time::InDST() {
+bool Win32Time::InDST() {
// Initialize timezone information, if needed.
TzSet();
@@ -565,14 +480,14 @@ bool Time::InDST() {
// Return the daylight savings time offset for this time.
-int64_t Time::DaylightSavingsOffset() {
+int64_t Win32Time::DaylightSavingsOffset() {
return InDST() ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Time::LocalTimezone() {
+char* Win32Time::LocalTimezone() {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
return InDST() ? dst_tz_name_ : std_tz_name_;
@@ -614,22 +529,14 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
double OS::TimeCurrentMillis() {
- Time t;
- t.SetToCurrentTime();
- return t.ToJSTime();
-}
-
-
-// Returns the tickcounter based on timeGetTime.
-int64_t OS::Ticks() {
- return timeGetTime() * 1000; // Convert to microseconds.
+ return Time::Now().ToJsTime();
}
// Returns a string identifying the current timezone taking into
// account daylight saving.
const char* OS::LocalTimezone(double time) {
- return Time(time).LocalTimezone();
+ return Win32Time(time).LocalTimezone();
}
@@ -637,7 +544,7 @@ const char* OS::LocalTimezone(double time) {
// taking daylight savings time into account.
double OS::LocalTimeOffset() {
// Use current time, rounded to the millisecond.
- Time t(TimeCurrentMillis());
+ Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
}
@@ -646,7 +553,7 @@ double OS::LocalTimeOffset() {
// Returns the daylight savings offset in milliseconds for the given
// time.
double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Time(time).DaylightSavingsOffset();
+ int64_t offset = Win32Time(time).DaylightSavingsOffset();
return static_cast<double>(offset);
}
@@ -835,35 +742,6 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
- if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
- return true;
- // Ask the Windows API
- if (IsBadWritePtr(pointer, 1))
- return true;
- return false;
-}
-
// Get the system's page size used by VirtualAlloc() or the next power
// of two. The reason for always returning a power of two is that the
@@ -910,8 +788,9 @@ void* OS::GetRandomMmapAddr() {
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
- uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
- | kAllocationRandomAddressMin;
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
@@ -950,14 +829,13 @@ void* OS::Allocate(const size_t requested,
prot);
if (mbase == NULL) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase;
}
@@ -982,7 +860,7 @@ void OS::ProtectCode(void* address, const size_t size) {
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
@@ -991,13 +869,6 @@ void OS::Sleep(int milliseconds) {
}
-int OS::NumberOfCores() {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-}
-
-
void OS::Abort() {
if (IsDebuggerPresent() || FLAG_break_on_abort) {
DebugBreak();
@@ -1010,6 +881,9 @@ void OS::Abort() {
void OS::DebugBreak() {
#ifdef _MSC_VER
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
__debugbreak();
#else
::DebugBreak();
@@ -1255,7 +1129,7 @@ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
// Load the symbols for generating stack traces.
-static bool LoadSymbols(HANDLE process_handle) {
+static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
static bool symbols_loaded = false;
if (symbols_loaded) return true;
@@ -1304,7 +1178,7 @@ static bool LoadSymbols(HANDLE process_handle) {
if (err != ERROR_MOD_NOT_FOUND &&
err != ERROR_INVALID_HANDLE) return false;
}
- LOG(i::Isolate::Current(),
+ LOG(isolate,
SharedLibraryEvent(
module_entry.szExePath,
reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
@@ -1319,14 +1193,14 @@ static bool LoadSymbols(HANDLE process_handle) {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// SharedLibraryEvents are logged when loading symbol information.
// Only the shared libraries loaded at the time of the call to
// LogSharedLibraryAddresses are logged. DLLs loaded after
// initialization are not accounted for.
if (!LoadDbgHelpAndTlHelp32()) return;
HANDLE process_handle = GetCurrentProcess();
- LoadSymbols(process_handle);
+ LoadSymbols(isolate, process_handle);
}
@@ -1352,7 +1226,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
HANDLE thread_handle = GetCurrentThread();
// Read the symbols.
- if (!LoadSymbols(process_handle)) return kStackWalkError;
+ if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
// Capture current context.
CONTEXT context;
@@ -1458,7 +1332,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
#pragma warning(pop)
#else // __MINGW32__
-void OS::LogSharedLibraryAddresses() { }
+void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
void OS::SignalCodeMovingGC() { }
int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
@@ -1562,7 +1436,7 @@ bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
+ PAGE_NOACCESS)) {
return false;
}
return true;
@@ -1579,8 +1453,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
@@ -1703,297 +1575,4 @@ void Thread::YieldCPU() {
Sleep(0);
}
-
-// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 semaphore support.
-//
-// On Win32 semaphores are implemented using Win32 Semaphore objects. The
-// semaphores are anonymous. Also, the semaphores are initialized to have
-// no upper limit on count.
-
-
-class Win32Semaphore : public Semaphore {
- public:
- explicit Win32Semaphore(int count) {
- sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- }
-
- ~Win32Semaphore() {
- CloseHandle(sem);
- }
-
- void Wait() {
- WaitForSingleObject(sem, INFINITE);
- }
-
- bool Wait(int timeout) {
- // Timeout in Windows API is in milliseconds.
- DWORD millis_timeout = timeout / 1000;
- return WaitForSingleObject(sem, millis_timeout) != WAIT_TIMEOUT;
- }
-
- void Signal() {
- LONG dummy;
- ReleaseSemaphore(sem, 1, &dummy);
- }
-
- private:
- HANDLE sem;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new Win32Semaphore(count);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 socket support.
-//
-
-class Win32Socket : public Socket {
- public:
- explicit Win32Socket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- }
- explicit Win32Socket(SOCKET socket): socket_(socket) { }
- virtual ~Win32Socket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != INVALID_SOCKET; }
-
- private:
- SOCKET socket_;
-};
-
-
-bool Win32Socket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- reinterpret_cast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool Win32Socket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* Win32Socket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- SOCKET socket = accept(socket_, NULL, NULL);
- if (socket == INVALID_SOCKET) {
- return NULL;
- } else {
- return new Win32Socket(socket);
- }
-}
-
-
-bool Win32Socket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- status = connect(socket_,
- result->ai_addr,
- static_cast<int>(result->ai_addrlen));
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool Win32Socket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SD_BOTH);
- closesocket(socket_);
- socket_ = INVALID_SOCKET;
- return status == SOCKET_ERROR;
- }
- return true;
-}
-
-
-int Win32Socket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else {
- return 0;
- }
- }
- return written;
-}
-
-
-int Win32Socket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status = recv(socket_, data, len, 0);
- return (status == SOCKET_ERROR) ? 0 : status;
-}
-
-
-bool Win32Socket::SetReuseAddress(bool reuse_address) {
- BOOL on = reuse_address ? true : false;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- reinterpret_cast<char*>(&on), sizeof(on));
- return status == SOCKET_ERROR;
-}
-
-
-bool Socket::SetUp() {
- // Initialize Winsock32
- int err;
- WSADATA winsock_data;
- WORD version_requested = MAKEWORD(1, 0);
- err = WSAStartup(version_requested, &winsock_data);
- if (err != 0) {
- PrintF("Unable to initialize Winsock, err = %d\n", Socket::LastError());
- }
-
- return err == 0;
-}
-
-
-int Socket::LastError() {
- return WSAGetLastError();
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new Win32Socket();
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 8b27c19a6..ee8fb9291 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -44,6 +44,13 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
+#include <cstdarg>
+
+#include "platform/mutex.h"
+#include "platform/semaphore.h"
+#include "utils.h"
+#include "v8globals.h"
+
#ifdef __sun
# ifndef signbit
namespace std {
@@ -52,22 +59,8 @@ int signbit(double x);
# endif
#endif
-// GCC specific stuff
-#ifdef __GNUC__
-
-// Needed for va_list on at least MinGW and Android.
-#include <stdarg.h>
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-#endif // __GNUC__
-
-
-// Windows specific stuff.
-#ifdef WIN32
-
// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
+#if V8_CC_MSVC
#include "win32-headers.h"
#include "win32-math.h"
@@ -76,7 +69,7 @@ int strncasecmp(const char* s1, const char* s2, int n);
inline int lrint(double flt) {
int intgr;
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
__asm {
fld flt
fistp intgr
@@ -91,25 +84,11 @@ inline int lrint(double flt) {
return intgr;
}
-#endif // _MSC_VER
-
-#ifndef __CYGWIN__
-// Random is missing on both Visual Studio and MinGW.
-int random();
-#endif
-
-#endif // WIN32
-
-#include "lazy-instance.h"
-#include "utils.h"
-#include "v8globals.h"
+#endif // V8_CC_MSVC
namespace v8 {
namespace internal {
-class Semaphore;
-class Mutex;
-
double ceiling(double x);
double modulo(double x, double y);
@@ -124,9 +103,6 @@ double fast_sqrt(double input);
// on demand.
void lazily_initialize_fast_exp();
-// Forward declarations.
-class Socket;
-
// ----------------------------------------------------------------------------
// Fast TLS support
@@ -190,26 +166,16 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
class OS {
public:
- // Initializes the platform OS support. Called once at VM startup.
- static void SetUp();
-
// Initializes the platform OS support that depend on CPU features. This is
// called after CPU initialization.
static void PostSetUp();
- // Clean up platform-OS-related things. Called once at VM shutdown.
- static void TearDown();
-
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
// micro-second resolution.
static int GetUserTime(uint32_t* secs, uint32_t* usecs);
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
@@ -277,18 +243,9 @@ class OS {
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
- static int NumberOfCores();
-
// Abort the current process.
static void Abort();
@@ -309,18 +266,6 @@ class OS {
static int StackWalk(Vector<StackFrame> frames);
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
- // Factory method for creating platform dependent Semaphore.
- // Please use delete to reclaim the storage for the returned Semaphore.
- static Semaphore* CreateSemaphore(int count);
-
- // Factory method for creating platform dependent Socket.
- // Please use delete to reclaim the storage for the returned Socket.
- static Socket* CreateSocket();
-
class MemoryMappedFile {
public:
static MemoryMappedFile* open(const char* name);
@@ -342,7 +287,7 @@ class OS {
// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
- static void LogSharedLibraryAddresses();
+ static void LogSharedLibraryAddresses(Isolate* isolate);
// Support for the profiler. Notifies the external profiling
// process that a code moving garbage collection starts. Can do
@@ -365,22 +310,10 @@ class OS {
// Returns the double constant NAN
static double nan_value();
- // Support runtime detection of Cpu implementer
- static CpuImplementer GetCpuImplementer();
-
- // Support runtime detection of Cpu implementer
- static CpuPart GetCpuPart(CpuImplementer implementer);
-
- // Support runtime detection of VFP3 on ARM CPUs.
- static bool ArmCpuHasFeature(CpuFeature feature);
-
// Support runtime detection of whether the hard float option of the
// EABI is used.
static bool ArmUsingHardFloat();
- // Support runtime detection of FPU on MIPS CPUs.
- static bool MipsCpuHasFeature(CpuFeature feature);
-
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
@@ -547,59 +480,6 @@ class VirtualMemory {
// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-template <int InitialValue>
-struct CreateSemaphoreTrait {
- static Semaphore* Create() {
- return OS::CreateSemaphore(InitialValue);
- }
-};
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// // The following semaphore starts at 0.
-// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-// void my_function() {
-// // Do something with my_semaphore.Pointer().
-// }
-//
-template <int InitialValue>
-struct LazySemaphore {
- typedef typename LazyDynamicInstance<
- Semaphore, CreateSemaphoreTrait<InitialValue>,
- ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-
-// ----------------------------------------------------------------------------
// Thread
//
// Thread objects are used for creating and running threads. When the start()
@@ -641,7 +521,7 @@ class Thread {
// Start new thread and wait until Run() method is called on the new thread.
void StartSynchronously() {
- start_semaphore_ = OS::CreateSemaphore(0);
+ start_semaphore_ = new Semaphore(0);
Start();
start_semaphore_->Wait();
delete start_semaphore_;
@@ -714,113 +594,6 @@ class Thread {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-struct CreateMutexTrait {
- static Mutex* Create() {
- return OS::CreateMutex();
- }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// ScopedLock my_lock(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef LazyDynamicInstance<
- Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket {
- public:
- virtual ~Socket() {}
-
- // Server initialization.
- virtual bool Bind(const int port) = 0;
- virtual bool Listen(int backlog) const = 0;
- virtual Socket* Accept() const = 0;
-
- // Client initialization.
- virtual bool Connect(const char* host, const char* port) = 0;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After Shutdown the Socket object cannot be used for
- // any communication.
- virtual bool Shutdown() = 0;
-
- // Data Transimission
- // Return 0 on failure.
- virtual int Send(const char* data, int len) const = 0;
- virtual int Receive(char* data, int len) const = 0;
-
- // Set the value of the SO_REUSEADDR socket option.
- virtual bool SetReuseAddress(bool reuse_address) = 0;
-
- virtual bool IsValid() const = 0;
-
- static bool SetUp();
- static int LastError();
- static uint16_t HToN(uint16_t value);
- static uint16_t NToH(uint16_t value);
- static uint32_t HToN(uint32_t value);
- static uint32_t NToH(uint32_t value);
-};
-
-
} } // namespace v8::internal
#endif // V8_PLATFORM_H_
diff --git a/deps/v8/src/platform/condition-variable.cc b/deps/v8/src/platform/condition-variable.cc
new file mode 100644
index 000000000..e2bf3882e
--- /dev/null
+++ b/deps/v8/src/platform/condition-variable.cc
@@ -0,0 +1,345 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/condition-variable.h"
+
+#include <cerrno>
+#include <ctime>
+
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+ConditionVariable::ConditionVariable() {
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ pthread_condattr_t attr;
+ int result = pthread_condattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ ASSERT_EQ(0, result);
+ result = pthread_cond_init(&native_handle_, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_destroy(&attr);
+#else
+ int result = pthread_cond_init(&native_handle_, NULL);
+#endif
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+ConditionVariable::~ConditionVariable() {
+ int result = pthread_cond_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyOne() {
+ int result = pthread_cond_signal(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyAll() {
+ int result = pthread_cond_broadcast(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ mutex->AssertHeldAndUnmark();
+ int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
+ ASSERT_EQ(0, result);
+ USE(result);
+ mutex->AssertUnheldAndMark();
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ struct timespec ts;
+ int result;
+ mutex->AssertHeldAndUnmark();
+#if V8_OS_MACOSX
+ // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
+ // not depend on the real time clock, which is what you really WANT here!
+ ts = rel_time.ToTimespec();
+ ASSERT_GE(ts.tv_sec, 0);
+ ASSERT_GE(ts.tv_nsec, 0);
+ result = pthread_cond_timedwait_relative_np(
+ &native_handle_, &mutex->native_handle(), &ts);
+#else
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ Time now = Time::FromTimespec(ts);
+#else
+ // The timeout argument to pthread_cond_timedwait() is in absolute time.
+ Time now = Time::NowFromSystemTime();
+#endif
+ Time end_time = now + rel_time;
+ ASSERT_GE(end_time, now);
+ ts = end_time.ToTimespec();
+ result = pthread_cond_timedwait(
+ &native_handle_, &mutex->native_handle(), &ts);
+#endif // V8_OS_MACOSX
+ mutex->AssertUnheldAndMark();
+ if (result == ETIMEDOUT) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+struct ConditionVariable::Event {
+ Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
+ ASSERT(handle_ != NULL);
+ }
+
+ ~Event() {
+ BOOL ok = ::CloseHandle(handle_);
+ ASSERT(ok);
+ USE(ok);
+ }
+
+ bool WaitFor(DWORD timeout_ms) {
+ DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ return false;
+ }
+
+ HANDLE handle_;
+ Event* next_;
+ HANDLE thread_;
+ volatile bool notified_;
+};
+
+
+ConditionVariable::NativeHandle::~NativeHandle() {
+ ASSERT(waitlist_ == NULL);
+
+ while (freelist_ != NULL) {
+ Event* event = freelist_;
+ freelist_ = event->next_;
+ delete event;
+ }
+}
+
+
+ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Grab an event from the free list or create a new one.
+ Event* event = freelist_;
+ if (event != NULL) {
+ freelist_ = event->next_;
+ } else {
+ event = new Event;
+ }
+ event->thread_ = GetCurrentThread();
+ event->notified_ = false;
+
+#ifdef DEBUG
+ // The event must not be on the wait list.
+ for (Event* we = waitlist_; we != NULL; we = we->next_) {
+ ASSERT_NE(event, we);
+ }
+#endif
+
+ // Prepend the event to the wait list.
+ event->next_ = waitlist_;
+ waitlist_ = event;
+
+ return event;
+}
+
+
+void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Remove the event from the wait list.
+ for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
+ ASSERT_NE(NULL, *wep);
+ if (*wep == event) {
+ *wep = event->next_;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ // The event must not be on the free list.
+ for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
+ ASSERT_NE(event, fe);
+ }
+#endif
+
+ // Reset the event.
+ BOOL ok = ::ResetEvent(event->handle_);
+ ASSERT(ok);
+ USE(ok);
+
+ // Insert the event into the free list.
+ event->next_ = freelist_;
+ freelist_ = event;
+
+ // Forward signals delivered after the timeout to the next waiting event.
+ if (!result && event->notified_ && waitlist_ != NULL) {
+ ok = ::SetEvent(waitlist_->handle_);
+ ASSERT(ok);
+ USE(ok);
+ waitlist_->notified_ = true;
+ }
+}
+
+
+ConditionVariable::ConditionVariable() {}
+
+
+ConditionVariable::~ConditionVariable() {}
+
+
+void ConditionVariable::NotifyOne() {
+ // Notify the thread with the highest priority in the waitlist
+ // that was not already signalled.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ Event* highest_event = NULL;
+ int highest_priority = std::numeric_limits<int>::min();
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (event->notified_) {
+ continue;
+ }
+ int priority = GetThreadPriority(event->thread_);
+ ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
+ if (priority >= highest_priority) {
+ highest_priority = priority;
+ highest_event = event;
+ }
+ }
+ if (highest_event != NULL) {
+ ASSERT(!highest_event->notified_);
+ ::SetEvent(highest_event->handle_);
+ highest_event->notified_ = true;
+ }
+}
+
+
+void ConditionVariable::NotifyAll() {
+ // Notify all threads on the waitlist.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (!event->notified_) {
+ ::SetEvent(event->handle_);
+ event->notified_ = true;
+ }
+ }
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ while (!event->WaitFor(INFINITE))
+ ;
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event (we must have been notified).
+ ASSERT(event->notified_);
+ native_handle_.Post(event, true);
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ bool result = false;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ result = event->WaitFor(INFINITE - 1);
+ if (result) {
+ break;
+ }
+ now = TimeTicks::Now();
+ } else {
+ result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
+ break;
+ }
+ }
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event.
+ ASSERT(!result || event->notified_);
+ native_handle_.Post(event, result);
+
+ return result;
+}
+
+#endif // V8_OS_POSIX
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/condition-variable.h b/deps/v8/src/platform/condition-variable.h
new file mode 100644
index 000000000..4d8a88aee
--- /dev/null
+++ b/deps/v8/src/platform/condition-variable.h
@@ -0,0 +1,140 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
+#define V8_PLATFORM_CONDITION_VARIABLE_H_
+
+#include "platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class ConditionVariableEvent;
+class TimeDelta;
+
+// -----------------------------------------------------------------------------
+// ConditionVariable
+//
+// This class is a synchronization primitive that can be used to block a thread,
+// or multiple threads at the same time, until:
+// - a notification is received from another thread,
+// - a timeout expires, or
+// - a spurious wakeup occurs
+// Any thread that intends to wait on a ConditionVariable has to acquire a lock
+// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
+// the mutex and suspend the execution of the calling thread. When the condition
+// variable is notified, the thread is awakened, and the mutex is reacquired.
+
+class ConditionVariable V8_FINAL {
+ public:
+ ConditionVariable();
+ ~ConditionVariable();
+
+ // If any threads are waiting on this condition variable, calling
+ // |NotifyOne()| unblocks one of the waiting threads.
+ void NotifyOne();
+
+ // Unblocks all threads currently waiting for this condition variable.
+ void NotifyAll();
+
+ // |Wait()| causes the calling thread to block until the condition variable is
+ // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
+ // the current executing thread, and adds it to the list of threads waiting on
+ // this condition variable. The thread will be unblocked when |NotifyAll()| or
+ // |NotifyOne()| is executed. It may also be unblocked spuriously. When
+ // unblocked, regardless of the reason, the lock on the mutex is reacquired
+ // and |Wait()| exits.
+ void Wait(Mutex* mutex);
+
+ // Atomically releases the mutex, blocks the current executing thread, and
+ // adds it to the list of threads waiting on this condition variable. The
+ // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
+ // or when the relative timeout |rel_time| expires. It may also be unblocked
+ // spuriously. When unblocked, regardless of the reason, the lock on the mutex
+ // is reacquired and |WaitFor()| exits. Returns true if the condition variable
+ // was notified prior to the timeout.
+ bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_cond_t NativeHandle;
+#elif V8_OS_WIN
+ struct Event;
+ class NativeHandle V8_FINAL {
+ public:
+ NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
+ ~NativeHandle();
+
+ Event* Pre() V8_WARN_UNUSED_RESULT;
+ void Post(Event* event, bool result);
+
+ Mutex* mutex() { return &mutex_; }
+ Event* waitlist() { return waitlist_; }
+
+ private:
+ Event* waitlist_;
+ Event* freelist_;
+ Mutex mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeHandle);
+ };
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+
+// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyConditionVariable my_condvar =
+// LAZY_CONDITION_VARIABLE_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> lock_guard(&my_mutex);
+// my_condvar.Pointer()->Wait(&my_mutex);
+// }
+typedef LazyStaticInstance<ConditionVariable,
+ DefaultConstructTrait<ConditionVariable>,
+ ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+
+#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/deps/v8/src/platform/elapsed-timer.h b/deps/v8/src/platform/elapsed-timer.h
new file mode 100644
index 000000000..2311db2f5
--- /dev/null
+++ b/deps/v8/src/platform/elapsed-timer.h
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
+#define V8_PLATFORM_ELAPSED_TIMER_H_
+
+#include "checks.h"
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+class ElapsedTimer V8_FINAL BASE_EMBEDDED {
+ public:
+#ifdef DEBUG
+ ElapsedTimer() : started_(false) {}
+#endif
+
+ // Starts this timer. Once started a timer can be checked with
+ // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+ // This method must not be called on an already started timer.
+ void Start() {
+ ASSERT(!IsStarted());
+ start_ticks_ = Now();
+#ifdef DEBUG
+ started_ = true;
+#endif
+ ASSERT(IsStarted());
+ }
+
+ // Stops this timer. Must not be called on a timer that was not
+ // started before.
+ void Stop() {
+ ASSERT(IsStarted());
+ start_ticks_ = TimeTicks();
+#ifdef DEBUG
+ started_ = false;
+#endif
+ ASSERT(!IsStarted());
+ }
+
+ // Returns |true| if this timer was started previously.
+ bool IsStarted() const {
+ ASSERT(started_ || start_ticks_.IsNull());
+ ASSERT(!started_ || !start_ticks_.IsNull());
+ return !start_ticks_.IsNull();
+ }
+
+ // Restarts the timer and returns the time elapsed since the previous start.
+ // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+ // and then starting the timer again, but does so in one single operation,
+ // avoiding the need to obtain the clock value twice. It may only be called
+ // on a previously started timer.
+ TimeDelta Restart() {
+ ASSERT(IsStarted());
+ TimeTicks ticks = Now();
+ TimeDelta elapsed = ticks - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ start_ticks_ = ticks;
+ ASSERT(IsStarted());
+ return elapsed;
+ }
+
+ // Returns the time elapsed since the previous start. This method may only
+ // be called on a previously started timer.
+ TimeDelta Elapsed() const {
+ ASSERT(IsStarted());
+ TimeDelta elapsed = Now() - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ return elapsed;
+ }
+
+ // Returns |true| if the specified |time_delta| has elapsed since the
+ // previous start, or |false| if not. This method may only be called on
+ // a previously started timer.
+ bool HasExpired(TimeDelta time_delta) const {
+ ASSERT(IsStarted());
+ return Elapsed() >= time_delta;
+ }
+
+ private:
+ static V8_INLINE TimeTicks Now() {
+ TimeTicks now = TimeTicks::HighResNow();
+ ASSERT(!now.IsNull());
+ return now;
+ }
+
+ TimeTicks start_ticks_;
+#ifdef DEBUG
+ bool started_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_ELAPSED_TIMER_H_
diff --git a/deps/v8/src/platform/mutex.cc b/deps/v8/src/platform/mutex.cc
new file mode 100644
index 000000000..ad9774099
--- /dev/null
+++ b/deps/v8/src/platform/mutex.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/mutex.h"
+
+#include <cerrno>
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+ int result;
+#if defined(DEBUG)
+ // Use an error checking mutex in debug mode.
+ pthread_mutexattr_t attr;
+ result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+#else
+ // Use a fast mutex (default attributes).
+ result = pthread_mutex_init(mutex, NULL);
+#endif // defined(DEBUG)
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+ pthread_mutexattr_t attr;
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_destroy(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_lock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_unlock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_trylock(mutex);
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+ DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+ EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+ LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+ return TryEnterCriticalSection(cs);
+}
+
+#endif // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+ AssertUnheldAndMark();
+}
+
+
+void Mutex::Unlock() {
+ AssertHeldAndUnmark();
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+ AssertUnheldAndMark();
+ return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/mutex.h b/deps/v8/src/platform/mutex.h
new file mode 100644
index 000000000..0f899ca59
--- /dev/null
+++ b/deps/v8/src/platform/mutex.h
@@ -0,0 +1,238 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_MUTEX_H_
+#define V8_PLATFORM_MUTEX_H_
+
+#include "lazy-instance.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_POSIX
+#include <pthread.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+// either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+// attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex V8_FINAL {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ void Lock();
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ V8_INLINE void AssertHeldAndUnmark() {
+#ifdef DEBUG
+ ASSERT_EQ(1, level_);
+ level_--;
+#endif
+ }
+
+ V8_INLINE void AssertUnheldAndMark() {
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+ }
+
+ friend class ConditionVariable;
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<Mutex,
+ DefaultConstructTrait<Mutex>,
+ ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+// when it successfully calls either |Lock()| or |TryLock()|. During this
+// period, the thread may make additional calls to |Lock()| or |TryLock()|.
+// The period of ownership ends when the thread makes a matching number of
+// calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+// they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+// unspecified, but after that number is reached, calls to |Lock()| will
+// probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex V8_FINAL {
+ public:
+ RecursiveMutex();
+ ~RecursiveMutex();
+
+ // Locks the mutex. If another thread has already locked the mutex, a call to
+ // |Lock()| will block execution until the lock is acquired. A thread may call
+ // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+ // after the thread makes a matching number of calls to |Unlock()|.
+ // The behavior is undefined if the mutex is not unlocked before being
+ // destroyed, i.e. some thread still owns it.
+ void Lock();
+
+ // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+ // more call to |Lock()| than there were calls to unlock() made by this
+ // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+ // locked by the current thread of execution, otherwise, the behavior is
+ // undefined.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+ typedef Mutex::NativeHandle NativeHandle;
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+ DefaultConstructTrait<RecursiveMutex>,
+ ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard V8_FINAL {
+ public:
+ explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+ ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+ Mutex* mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockGuard);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_MUTEX_H_
diff --git a/deps/v8/src/platform/semaphore.cc b/deps/v8/src/platform/semaphore.cc
new file mode 100644
index 000000000..c3e5826f4
--- /dev/null
+++ b/deps/v8/src/platform/semaphore.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/semaphore.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#endif
+
+#include <cerrno>
+
+#include "checks.h"
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_MACOSX
+
+Semaphore::Semaphore(int count) {
+ kern_return_t result = semaphore_create(
+ mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ kern_return_t result = semaphore_signal(native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ kern_return_t result = semaphore_wait(native_handle_);
+ if (result == KERN_SUCCESS) return; // Semaphore was signalled.
+ ASSERT_EQ(KERN_ABORTED, result);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ mach_timespec_t ts;
+ if (now >= end) {
+ // Return immediately if semaphore was not signalled.
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ } else {
+ ts = (end - now).ToMachTimespec();
+ }
+ kern_return_t result = semaphore_timedwait(native_handle_, ts);
+ if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
+ if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
+ ASSERT_EQ(KERN_ABORTED, result);
+ now = TimeTicks::Now();
+ }
+}
+
+#elif V8_OS_POSIX
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ int result = sem_init(&native_handle_, 0, count);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ int result = sem_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ int result = sem_post(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&native_handle_);
+ if (result == 0) return; // Semaphore was signalled.
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ // Compute the time for end of timeout.
+ const Time time = Time::NowFromSystemTime() + rel_time;
+ const struct timespec ts = time.ToTimespec();
+
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&native_handle_, &ts);
+ if (result == 0) return true; // Semaphore was signalled.
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+ if (result > 0) {
+ // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
+ errno = result;
+ result = -1;
+ }
+#endif
+ if (result == -1 && errno == ETIMEDOUT) {
+ // Timed out while waiting for semaphore.
+ return false;
+ }
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+#elif V8_OS_WIN
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+ ASSERT(native_handle_ != NULL);
+}
+
+
+Semaphore::~Semaphore() {
+ BOOL result = CloseHandle(native_handle_);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ LONG dummy;
+ BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+ ASSERT(result == WAIT_OBJECT_0);
+ USE(result);
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ now = TimeTicks::Now();
+ } else {
+ DWORD result = WaitForSingleObject(
+ native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
+ if (result == WAIT_TIMEOUT) {
+ return false;
+ }
+ ASSERT(result == WAIT_OBJECT_0);
+ return true;
+ }
+ }
+}
+
+#endif // V8_OS_MACOSX
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/semaphore.h b/deps/v8/src/platform/semaphore.h
new file mode 100644
index 000000000..2cfa14211
--- /dev/null
+++ b/deps/v8/src/platform/semaphore.h
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_SEMAPHORE_H_
+#define V8_PLATFORM_SEMAPHORE_H_
+
+#include "lazy-instance.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/semaphore.h> // NOLINT
+#elif V8_OS_POSIX
+#include <semaphore.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class TimeDelta;
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore V8_FINAL {
+ public:
+ explicit Semaphore(int count);
+ ~Semaphore();
+
+ // Increments the semaphore counter.
+ void Signal();
+
+ // Suspends the calling thread until the semaphore counter is non zero
+ // and then decrements the semaphore counter.
+ void Wait();
+
+ // Suspends the calling thread until the counter is non zero or the timeout
+ // time has passed. If timeout happens the return value is false and the
+ // counter is unchanged. Otherwise the semaphore counter is decremented and
+ // true is returned.
+ bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+#if V8_OS_MACOSX
+ typedef semaphore_t NativeHandle;
+#elif V8_OS_POSIX
+ typedef sem_t NativeHandle;
+#elif V8_OS_WIN
+ typedef HANDLE NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Semaphore);
+};
+
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// // The following semaphore starts at 0.
+// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+// void my_function() {
+// // Do something with my_semaphore.Pointer().
+// }
+//
+
+template <int N>
+struct CreateSemaphoreTrait {
+ static Semaphore* Create() {
+ return new Semaphore(N);
+ }
+};
+
+template <int N>
+struct LazySemaphore {
+ typedef typename LazyDynamicInstance<
+ Semaphore,
+ CreateSemaphoreTrait<N>,
+ ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_SEMAPHORE_H_
diff --git a/deps/v8/src/platform/socket.cc b/deps/v8/src/platform/socket.cc
new file mode 100644
index 000000000..2fce6f299
--- /dev/null
+++ b/deps/v8/src/platform/socket.cc
@@ -0,0 +1,224 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/socket.h"
+
+#if V8_OS_POSIX
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netdb.h>
+
+#include <unistd.h>
+#endif
+
+#include <cerrno>
+
+#include "checks.h"
+#include "once.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_WIN
+
+static V8_DECLARE_ONCE(initialize_winsock) = V8_ONCE_INIT;
+
+
+static void InitializeWinsock() {
+ WSADATA wsa_data;
+ int result = WSAStartup(MAKEWORD(1, 0), &wsa_data);
+ CHECK_EQ(0, result);
+}
+
+#endif // V8_OS_WIN
+
+
+Socket::Socket() {
+#if V8_OS_WIN
+ // Be sure to initialize the WinSock DLL first.
+ CallOnce(&initialize_winsock, &InitializeWinsock);
+#endif // V8_OS_WIN
+
+ // Create the native socket handle.
+ native_handle_ = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+}
+
+
+bool Socket::Bind(int port) {
+ ASSERT_GE(port, 0);
+ ASSERT_LT(port, 65536);
+ if (!IsValid()) return false;
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ sin.sin_port = htons(static_cast<uint16_t>(port));
+ int result = ::bind(
+ native_handle_, reinterpret_cast<struct sockaddr*>(&sin), sizeof(sin));
+ return result == 0;
+}
+
+
+bool Socket::Listen(int backlog) {
+ if (!IsValid()) return false;
+ int result = ::listen(native_handle_, backlog);
+ return result == 0;
+}
+
+
+Socket* Socket::Accept() {
+ if (!IsValid()) return NULL;
+ while (true) {
+ NativeHandle native_handle = ::accept(native_handle_, NULL, NULL);
+ if (native_handle == kInvalidNativeHandle) {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return NULL;
+ }
+ return new Socket(native_handle);
+ }
+}
+
+
+bool Socket::Connect(const char* host, const char* port) {
+ ASSERT_NE(NULL, host);
+ ASSERT_NE(NULL, port);
+ if (!IsValid()) return false;
+
+ // Lookup host and port.
+ struct addrinfo* info = NULL;
+ struct addrinfo hint;
+ memset(&hint, 0, sizeof(hint));
+ hint.ai_family = AF_INET;
+ hint.ai_socktype = SOCK_STREAM;
+ hint.ai_protocol = IPPROTO_TCP;
+ int result = ::getaddrinfo(host, port, &hint, &info);
+ if (result != 0) {
+ return false;
+ }
+
+ // Connect to the host on the given port.
+ for (struct addrinfo* ai = info; ai != NULL; ai = ai->ai_next) {
+ // Try to connect using this addr info.
+ while (true) {
+ result = ::connect(
+ native_handle_, ai->ai_addr, static_cast<int>(ai->ai_addrlen));
+ if (result == 0) {
+ freeaddrinfo(info);
+ return true;
+ }
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ break;
+ }
+ }
+ freeaddrinfo(info);
+ return false;
+}
+
+
+bool Socket::Shutdown() {
+ if (!IsValid()) return false;
+ // Shutdown socket for both read and write.
+#if V8_OS_POSIX
+ int result = ::shutdown(native_handle_, SHUT_RDWR);
+ ::close(native_handle_);
+#elif V8_OS_WIN
+ int result = ::shutdown(native_handle_, SD_BOTH);
+ ::closesocket(native_handle_);
+#endif
+ native_handle_ = kInvalidNativeHandle;
+ return result == 0;
+}
+
+
+int Socket::Send(const char* buffer, int length) {
+ ASSERT(length <= 0 || buffer != NULL);
+ if (!IsValid()) return 0;
+ int offset = 0;
+ while (offset < length) {
+ int result = ::send(native_handle_, buffer + offset, length - offset, 0);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ ASSERT(result <= length - offset);
+ offset += result;
+ } else {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return 0;
+ }
+ }
+ return offset;
+}
+
+
+int Socket::Receive(char* buffer, int length) {
+ if (!IsValid()) return 0;
+ if (length <= 0) return 0;
+ ASSERT_NE(NULL, buffer);
+ while (true) {
+ int result = ::recv(native_handle_, buffer, length, 0);
+ if (result < 0) {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return 0;
+ }
+ return result;
+ }
+}
+
+
+bool Socket::SetReuseAddress(bool reuse_address) {
+ if (!IsValid()) return 0;
+ int v = reuse_address ? 1 : 0;
+ int result = ::setsockopt(native_handle_, SOL_SOCKET, SO_REUSEADDR,
+ reinterpret_cast<char*>(&v), sizeof(v));
+ return result == 0;
+}
+
+
+// static
+int Socket::GetLastError() {
+#if V8_OS_POSIX
+ return errno;
+#elif V8_OS_WIN
+ // Be sure to initialize the WinSock DLL first.
+ CallOnce(&initialize_winsock, &InitializeWinsock);
+
+ // Now we can safely perform WSA calls.
+ return ::WSAGetLastError();
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/socket.h b/deps/v8/src/platform/socket.h
new file mode 100644
index 000000000..ff8c1de7c
--- /dev/null
+++ b/deps/v8/src/platform/socket.h
@@ -0,0 +1,101 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_SOCKET_H_
+#define V8_PLATFORM_SOCKET_H_
+
+#include "globals.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Socket
+//
+
+class Socket V8_FINAL {
+ public:
+ Socket();
+ ~Socket() { Shutdown(); }
+
+ // Server initialization.
+ bool Bind(int port) V8_WARN_UNUSED_RESULT;
+ bool Listen(int backlog) V8_WARN_UNUSED_RESULT;
+ Socket* Accept() V8_WARN_UNUSED_RESULT;
+
+ // Client initialization.
+ bool Connect(const char* host, const char* port) V8_WARN_UNUSED_RESULT;
+
+ // Shutdown socket for both read and write. This causes blocking Send and
+ // Receive calls to exit. After |Shutdown()| the Socket object cannot be
+ // used for any communication.
+ bool Shutdown();
+
+ // Data Transimission
+ // Return 0 on failure.
+ int Send(const char* buffer, int length) V8_WARN_UNUSED_RESULT;
+ int Receive(char* buffer, int length) V8_WARN_UNUSED_RESULT;
+
+ // Set the value of the SO_REUSEADDR socket option.
+ bool SetReuseAddress(bool reuse_address);
+
+ V8_INLINE bool IsValid() const {
+ return native_handle_ != kInvalidNativeHandle;
+ }
+
+ static int GetLastError();
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef int NativeHandle;
+ static const NativeHandle kInvalidNativeHandle = -1;
+#elif V8_OS_WIN
+ typedef SOCKET NativeHandle;
+ static const NativeHandle kInvalidNativeHandle = INVALID_SOCKET;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ explicit Socket(NativeHandle native_handle) : native_handle_(native_handle) {}
+
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Socket);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_SOCKET_H_
diff --git a/deps/v8/src/platform/time.cc b/deps/v8/src/platform/time.cc
new file mode 100644
index 000000000..ea6dd2c0b
--- /dev/null
+++ b/deps/v8/src/platform/time.cc
@@ -0,0 +1,613 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/time.h"
+
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <cstring>
+
+#include "checks.h"
+#include "cpu.h"
+#include "platform.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_WIN
+// Prototype for GetTickCount64() procedure.
+extern "C" {
+typedef ULONGLONG (WINAPI *GETTICKCOUNT64PROC)(void);
+}
+#endif
+
+namespace v8 {
+namespace internal {
+
+TimeDelta TimeDelta::FromDays(int days) {
+ return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+ return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+ return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+ return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+ return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+ return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_MACOSX
+
+TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct mach_timespec TimeDelta::ToMachTimespec() const {
+ struct mach_timespec ts;
+ ASSERT(delta_ >= 0);
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_MACOSX
+
+
+#if V8_OS_POSIX
+
+TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec TimeDelta::ToTimespec() const {
+ struct timespec ts;
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_POSIX
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock V8_FINAL {
+ public:
+ Clock() : initial_time_(CurrentWallclockTime()),
+ initial_ticks_(TimeTicks::Now()) {}
+
+ Time Now() {
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Calculate the time elapsed since we started our timer.
+ TimeDelta elapsed = TimeTicks::Now() - initial_ticks_;
+
+ // Check if we don't need to synchronize with the wallclock yet.
+ if (elapsed.InMicroseconds() <= kMaxMicrosecondsToAvoidDrift) {
+ return initial_time_ + elapsed;
+ }
+
+ // Resynchronize with the wallclock.
+ initial_ticks_ = TimeTicks::Now();
+ initial_time_ = CurrentWallclockTime();
+ return initial_time_;
+ }
+
+ Time NowFromSystemTime() {
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Resynchronize with the wallclock.
+ initial_ticks_ = TimeTicks::Now();
+ initial_time_ = CurrentWallclockTime();
+ return initial_time_;
+ }
+
+ private:
+ // Time between resampling the un-granular clock for this API (1 minute).
+ static const int64_t kMaxMicrosecondsToAvoidDrift =
+ Time::kMicrosecondsPerMinute;
+
+ static Time CurrentWallclockTime() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return Time::FromFiletime(ft);
+ }
+
+ TimeTicks initial_ticks_;
+ Time initial_time_;
+ Mutex mutex_;
+};
+
+
+static LazyDynamicInstance<Clock,
+ DefaultCreateTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+ return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+ return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+ if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+ return Time();
+ }
+ if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+ ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+ return Max();
+ }
+ int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+ return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+ ASSERT(us_ >= 0);
+ FILETIME ft;
+ if (IsNull()) {
+ ft.dwLowDateTime = 0;
+ ft.dwHighDateTime = 0;
+ return ft;
+ }
+ if (IsMax()) {
+ ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ return ft;
+ }
+ uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+ ft.dwLowDateTime = static_cast<DWORD>(us);
+ ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+ return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+ return Now();
+}
+
+
+Time Time::FromTimespec(struct timespec ts) {
+ ASSERT(ts.tv_nsec >= 0);
+ ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
+ if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
+ return Time();
+ }
+ if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
+ ts.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(ts.tv_sec * kMicrosecondsPerSecond +
+ ts.tv_nsec / kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec Time::ToTimespec() const {
+ struct timespec ts;
+ if (IsNull()) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ return ts;
+ }
+ if (IsMax()) {
+ ts.tv_sec = std::numeric_limits<time_t>::max();
+ ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
+ return ts;
+ }
+ ts.tv_sec = us_ / kMicrosecondsPerSecond;
+ ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+ ASSERT(tv.tv_usec >= 0);
+ ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+ if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+ return Time();
+ }
+ if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+ tv.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+ struct timeval tv;
+ if (IsNull()) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return tv;
+ }
+ if (IsMax()) {
+ tv.tv_sec = std::numeric_limits<time_t>::max();
+ tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+ return tv;
+ }
+ tv.tv_sec = us_ / kMicrosecondsPerSecond;
+ tv.tv_usec = us_ % kMicrosecondsPerSecond;
+ return tv;
+}
+
+#endif // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ if (ms_since_epoch == std::numeric_limits<double>::max()) {
+ return Max();
+ }
+ return Time(
+ static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+ if (IsNull()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (IsMax()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::max();
+ }
+ return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+ virtual ~TickClock() {}
+ virtual int64_t Now() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock V8_FINAL : public TickClock {
+ public:
+ explicit HighResolutionTickClock(int64_t ticks_per_second)
+ : ticks_per_second_(ticks_per_second) {
+ ASSERT_LT(0, ticks_per_second);
+ }
+ virtual ~HighResolutionTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LARGE_INTEGER now;
+ BOOL result = QueryPerformanceCounter(&now);
+ ASSERT(result);
+ USE(result);
+
+ // Intentionally calculate microseconds in a round about manner to avoid
+ // overflow and precision issues. Think twice before simplifying!
+ int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+ int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+ int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+ ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+ // Make sure we never return 0 here, so that TimeTicks::HighResNow()
+ // will never return 0.
+ return ticks + 1;
+ }
+
+ private:
+ int64_t ticks_per_second_;
+};
+
+
+// The GetTickCount64() API is what we actually want for the regular tick
+// clock, but this is only available starting with Windows Vista.
+class WindowsVistaTickClock V8_FINAL : public TickClock {
+ public:
+ explicit WindowsVistaTickClock(GETTICKCOUNT64PROC func) : func_(func) {
+ ASSERT(func_ != NULL);
+ }
+ virtual ~WindowsVistaTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ // Query the current ticks (in ms).
+ ULONGLONG tick_count_ms = (*func_)();
+
+ // Convert to microseconds (make sure to never return 0 here).
+ return (tick_count_ms * Time::kMicrosecondsPerMillisecond) + 1;
+ }
+
+ private:
+ GETTICKCOUNT64PROC func_;
+};
+
+
+class RolloverProtectedTickClock V8_FINAL : public TickClock {
+ public:
+ // We initialize rollover_ms_ to 1 to ensure that we will never
+ // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
+ RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ virtual ~RolloverProtectedTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+ // every ~49.7 days. We try to track rollover ourselves, which works if
+ // TimeTicks::Now() is called at least every 49 days.
+ // Note that we do not use GetTickCount() here, since timeGetTime() gives
+ // more predictable delta values, as described here:
+ // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ DWORD now = timeGetTime();
+ if (now < last_seen_now_) {
+ rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
+ }
+ last_seen_now_ = now;
+ return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+ }
+
+ private:
+ Mutex mutex_;
+ DWORD last_seen_now_;
+ int64_t rollover_ms_;
+};
+
+
+struct CreateTickClockTrait {
+ static TickClock* Create() {
+ // Try to load GetTickCount64() from kernel32.dll (available since Vista).
+ HMODULE kernel32 = ::GetModuleHandleA("kernel32.dll");
+ ASSERT(kernel32 != NULL);
+ FARPROC proc = ::GetProcAddress(kernel32, "GetTickCount64");
+ if (proc != NULL) {
+ return new WindowsVistaTickClock(
+ reinterpret_cast<GETTICKCOUNT64PROC>(proc));
+ }
+
+ // Fallback to the rollover protected tick clock.
+ return new RolloverProtectedTickClock;
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateTickClockTrait,
+ ThreadSafeInitOnceTrait>::type tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+ static TickClock* Create() {
+ // Check if the installed hardware supports a high-resolution performance
+ // counter, and if not fallback to the low-resolution tick clock.
+ LARGE_INTEGER ticks_per_second;
+ if (!QueryPerformanceFrequency(&ticks_per_second)) {
+ return tick_clock.Pointer();
+ }
+
+ // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+ // is unreliable, fallback to the low-resolution tick clock.
+ CPU cpu;
+ if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+ return tick_clock.Pointer();
+ }
+
+ return new HighResolutionTickClock(ticks_per_second.QuadPart);
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateHighResTickClockTrait,
+ ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResNow() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+#else // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+ return HighResNow();
+}
+
+
+TimeTicks TimeTicks::HighResNow() {
+ int64_t ticks;
+#if V8_OS_MACOSX
+ static struct mach_timebase_info info;
+ if (info.denom == 0) {
+ kern_return_t result = mach_timebase_info(&info);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+ }
+ ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ info.numer / info.denom);
+#elif V8_OS_SOLARIS
+ ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_LIBRT_NOT_AVAILABLE
+ // TODO(bmeurer): This is a temporary hack to support cross-compiling
+ // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
+ // cleanup the tools/gyp/v8.gyp file.
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
+#elif V8_OS_POSIX
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif // V8_OS_MACOSX
+ // Make sure we never return 0 here.
+ return TimeTicks(ticks + 1);
+}
+
+#endif // V8_OS_WIN
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform/time.h b/deps/v8/src/platform/time.h
new file mode 100644
index 000000000..2ce6cdd3e
--- /dev/null
+++ b/deps/v8/src/platform/time.h
@@ -0,0 +1,413 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_TIME_H_
+#define V8_PLATFORM_TIME_H_
+
+#include <ctime>
+#include <limits>
+
+#include "allocation.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct mach_timespec;
+struct timespec;
+struct timeval;
+}
+
+namespace v8 {
+namespace internal {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeDelta() : delta_(0) {}
+
+ // Converts units of time to TimeDeltas.
+ static TimeDelta FromDays(int days);
+ static TimeDelta FromHours(int hours);
+ static TimeDelta FromMinutes(int minutes);
+ static TimeDelta FromSeconds(int64_t seconds);
+ static TimeDelta FromMilliseconds(int64_t milliseconds);
+ static TimeDelta FromMicroseconds(int64_t microseconds) {
+ return TimeDelta(microseconds);
+ }
+ static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ //
+ // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+ // to the next full millisecond.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ int64_t InMicroseconds() const { return delta_; }
+ int64_t InNanoseconds() const;
+
+ // Converts to/from Mach time specs.
+ static TimeDelta FromMachTimespec(struct mach_timespec ts);
+ struct mach_timespec ToMachTimespec() const;
+
+ // Converts to/from POSIX time specs.
+ static TimeDelta FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ TimeDelta& operator=(const TimeDelta& other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(const TimeDelta& other) const {
+ return TimeDelta(delta_ + other.delta_);
+ }
+ TimeDelta operator-(const TimeDelta& other) const {
+ return TimeDelta(delta_ - other.delta_);
+ }
+
+ TimeDelta& operator+=(const TimeDelta& other) {
+ delta_ += other.delta_;
+ return *this;
+ }
+ TimeDelta& operator-=(const TimeDelta& other) {
+ delta_ -= other.delta_;
+ return *this;
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ double TimesOf(const TimeDelta& other) const {
+ return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+ }
+ double PercentOf(const TimeDelta& other) const {
+ return TimesOf(other) * 100.0;
+ }
+
+ // Computations with ints, note that we only allow multiplicative operations
+ // with ints, and additive operations with other deltas.
+ TimeDelta operator*(int64_t a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta operator/(int64_t a) const {
+ return TimeDelta(delta_ / a);
+ }
+ TimeDelta& operator*=(int64_t a) {
+ delta_ *= a;
+ return *this;
+ }
+ TimeDelta& operator/=(int64_t a) {
+ delta_ /= a;
+ return *this;
+ }
+ int64_t operator/(const TimeDelta& other) const {
+ return delta_ / other.delta_;
+ }
+
+ // Comparison operators.
+ bool operator==(const TimeDelta& other) const {
+ return delta_ == other.delta_;
+ }
+ bool operator!=(const TimeDelta& other) const {
+ return delta_ != other.delta_;
+ }
+ bool operator<(const TimeDelta& other) const {
+ return delta_ < other.delta_;
+ }
+ bool operator<=(const TimeDelta& other) const {
+ return delta_ <= other.delta_;
+ }
+ bool operator>(const TimeDelta& other) const {
+ return delta_ > other.delta_;
+ }
+ bool operator>=(const TimeDelta& other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time V8_FINAL BASE_EMBEDDED {
+ public:
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+ kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+ kMicrosecondsPerSecond;
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : us_(0) {}
+
+ // Returns true if the time object has not been initialized.
+ bool IsNull() const { return us_ == 0; }
+
+ // Returns true if the time object is the maximum time.
+ bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch() { return Time(0); }
+
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static Time FromInternalValue(int64_t value) {
+ return Time(value);
+ }
+ int64_t ToInternalValue() const {
+ return us_;
+ }
+
+ // Converts to/from POSIX time specs.
+ static Time FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ // Converts to/from POSIX time values.
+ static Time FromTimeval(struct timeval tv);
+ struct timeval ToTimeval() const;
+
+ // Converts to/from Windows file times.
+ static Time FromFiletime(struct _FILETIME ft);
+ struct _FILETIME ToFiletime() const;
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+
+ Time& operator=(const Time& other) {
+ us_ = other.us_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const Time& other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Modify by some time delta.
+ Time& operator+=(const TimeDelta& delta) {
+ us_ += delta.InMicroseconds();
+ return *this;
+ }
+ Time& operator-=(const TimeDelta& delta) {
+ us_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new time modified by some delta.
+ Time operator+(const TimeDelta& delta) const {
+ return Time(us_ + delta.InMicroseconds());
+ }
+ Time operator-(const TimeDelta& delta) const {
+ return Time(us_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const Time& other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(const Time& other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(const Time& other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(const Time& other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(const Time& other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(const Time& other) const {
+ return us_ >= other.us_;
+ }
+
+ private:
+ explicit Time(int64_t us) : us_(us) {}
+
+ // Time in microseconds in UTC.
+ int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+ return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds. It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump). But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeTicks() : ticks_(0) {}
+
+ // Platform-dependent tick count representing "right now."
+ // The resolution of this clock is ~1-15ms. Resolution varies depending
+ // on hardware/operating system configuration.
+ // This method never returns a null TimeTicks.
+ static TimeTicks Now();
+
+ // Returns a platform-dependent high-resolution tick count. Implementation
+ // is hardware dependent and may or may not return sub-millisecond
+ // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+ // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+ // This method never returns a null TimeTicks.
+ static TimeTicks HighResNow();
+
+ // Returns true if this object has not been initialized.
+ bool IsNull() const { return ticks_ == 0; }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static TimeTicks FromInternalValue(int64_t value) {
+ return TimeTicks(value);
+ }
+ int64_t ToInternalValue() const {
+ return ticks_;
+ }
+
+ TimeTicks& operator=(const TimeTicks other) {
+ ticks_ = other.ticks_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const TimeTicks other) const {
+ return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+ }
+
+ // Modify by some time delta.
+ TimeTicks& operator+=(const TimeDelta& delta) {
+ ticks_ += delta.InMicroseconds();
+ return *this;
+ }
+ TimeTicks& operator-=(const TimeDelta& delta) {
+ ticks_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new TimeTicks modified by some delta.
+ TimeTicks operator+(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ + delta.InMicroseconds());
+ }
+ TimeTicks operator-(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const TimeTicks& other) const {
+ return ticks_ == other.ticks_;
+ }
+ bool operator!=(const TimeTicks& other) const {
+ return ticks_ != other.ticks_;
+ }
+ bool operator<(const TimeTicks& other) const {
+ return ticks_ < other.ticks_;
+ }
+ bool operator<=(const TimeTicks& other) const {
+ return ticks_ <= other.ticks_;
+ }
+ bool operator>(const TimeTicks& other) const {
+ return ticks_ > other.ticks_;
+ }
+ bool operator>=(const TimeTicks& other) const {
+ return ticks_ >= other.ticks_;
+ }
+
+ private:
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks is in microseconds.
+ explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+ // Tick count in microseconds.
+ int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+ return ticks + delta;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_TIME_H_
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index faddecc56..9358d6bd1 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -104,11 +104,6 @@ class DuplicateFinder {
};
-#ifdef WIN32
-#undef Yield
-#endif
-
-
class PreParser {
public:
enum PreParseResult {
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 1824efa7f..b1bac4cd4 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -38,11 +38,11 @@ namespace internal {
#ifdef DEBUG
-PrettyPrinter::PrettyPrinter() {
+PrettyPrinter::PrettyPrinter(Isolate* isolate) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor();
+ InitializeAstVisitor(isolate);
}
@@ -480,8 +480,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(AstNode* node) {
- PrettyPrinter printer;
+void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+ PrettyPrinter printer(isolate);
PrintF("%s", printer.Print(node));
}
@@ -658,7 +658,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter() : indent_(0) {
+AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index 6657ecd14..b7ff2af5f 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -38,7 +38,7 @@ namespace internal {
class PrettyPrinter: public AstVisitor {
public:
- PrettyPrinter();
+ explicit PrettyPrinter(Isolate* isolate);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -50,7 +50,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(AstNode* node);
+ static void PrintOut(Isolate* isolate, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -82,7 +82,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- AstPrinter();
+ explicit AstPrinter(Isolate* isolate);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 6791c88c5..f2feb73fc 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -56,8 +56,8 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
line_number_(line_number),
shared_id_(0),
script_id_(v8::Script::kNoScriptId),
- no_frame_ranges_(NULL) {
-}
+ no_frame_ranges_(NULL),
+ bailout_reason_(kEmptyBailoutReason) { }
bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
@@ -73,11 +73,9 @@ bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
- total_ticks_(0),
self_ticks_(0),
children_(CodeEntriesMatch),
- id_(tree->next_node_id()) {
-}
+ id_(tree->next_node_id()) { }
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 86bd17b70..38c1f785d 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -41,8 +41,8 @@ namespace v8 {
namespace internal {
-StringsStorage::StringsStorage()
- : names_(StringsMatch) {
+StringsStorage::StringsStorage(Heap* heap)
+ : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
}
@@ -61,7 +61,7 @@ const char* StringsStorage::GetCopy(const char* src) {
OS::StrNCpy(dst, src, len);
dst[len] = '\0';
uint32_t hash =
- StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed());
+ StringHasher::HashSequentialString(dst.start(), len, hash_seed_);
return AddOrDisposeString(dst.start(), hash);
}
@@ -95,7 +95,7 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
return format;
}
uint32_t hash = StringHasher::HashSequentialString(
- str.start(), len, HEAP->HashSeed());
+ str.start(), len, hash_seed_);
return AddOrDisposeString(str.start(), hash);
}
@@ -133,6 +133,7 @@ size_t StringsStorage::GetUsedMemorySize() const {
const char* const CodeEntry::kEmptyNamePrefix = "";
const char* const CodeEntry::kEmptyResourceName = "";
+const char* const CodeEntry::kEmptyBailoutReason = "";
CodeEntry::~CodeEntry() {
@@ -209,24 +210,15 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
}
-double ProfileNode::GetSelfMillis() const {
- return tree_->TicksToMillis(self_ticks_);
-}
-
-
-double ProfileNode::GetTotalMillis() const {
- return tree_->TicksToMillis(total_ticks_);
-}
-
-
void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s %d #%d",
- total_ticks_, self_ticks_,
+ OS::Print("%5u %*c %s%s %d #%d %s",
+ self_ticks_,
indent, ' ',
entry_->name_prefix(),
entry_->name(),
entry_->script_id(),
- id());
+ id(),
+ entry_->bailout_reason());
if (entry_->resource_name()[0] != '\0')
OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
OS::Print("\n");
@@ -298,11 +290,6 @@ struct NodesPair {
};
-void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
- ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
-}
-
-
class Position {
public:
explicit Position(ProfileNode* node)
@@ -345,39 +332,12 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-class CalculateTotalTicksCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- node->IncreaseTotalTicks(node->self_ticks());
- }
-
- void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
- parent->IncreaseTotalTicks(child->total_ticks());
- }
-};
-
-
-void ProfileTree::CalculateTotalTicks() {
- CalculateTotalTicksCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::ShortPrint() {
- OS::Print("root: %u %u %.2fms %.2fms\n",
- root_->total_ticks(), root_->self_ticks(),
- root_->GetTotalMillis(), root_->GetSelfMillis());
-}
-
-
CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
: title_(title),
uid_(uid),
record_samples_(record_samples),
- start_time_us_(OS::Ticks()),
- end_time_us_(0) {
+ start_time_(Time::NowFromSystemTime()) {
+ timer_.Start();
}
@@ -388,20 +348,7 @@ void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
- end_time_us_ = OS::Ticks();
- top_down_.CalculateTotalTicks();
-
- double duration_ms = (end_time_us_ - start_time_us_) / 1000.;
- if (duration_ms < 1) duration_ms = 1;
- unsigned ticks = top_down_.root()->total_ticks();
- double rate = ticks / duration_ms;
- top_down_.SetTickRatePerMs(rate);
-}
-
-
-void CpuProfile::ShortPrint() {
- OS::Print("top down ");
- top_down_.ShortPrint();
+ end_time_ = start_time_ + timer_.Elapsed();
}
@@ -496,8 +443,9 @@ void CodeMap::Print() {
}
-CpuProfilesCollection::CpuProfilesCollection()
- : current_profiles_semaphore_(OS::CreateSemaphore(1)) {
+CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
+ : function_and_resource_names_(heap),
+ current_profiles_semaphore_(1) {
}
@@ -512,7 +460,6 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
CpuProfilesCollection::~CpuProfilesCollection() {
- delete current_profiles_semaphore_;
finished_profiles_.Iterate(DeleteCpuProfile);
current_profiles_.Iterate(DeleteCpuProfile);
code_entries_.Iterate(DeleteCodeEntry);
@@ -522,20 +469,20 @@ CpuProfilesCollection::~CpuProfilesCollection() {
bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
bool record_samples) {
ASSERT(uid > 0);
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return false;
}
for (int i = 0; i < current_profiles_.length(); ++i) {
if (strcmp(current_profiles_[i]->title(), title) == 0) {
// Ignore attempts to start profile with the same title.
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return false;
}
}
current_profiles_.Add(new CpuProfile(title, uid, record_samples));
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return true;
}
@@ -543,14 +490,14 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
for (int i = current_profiles_.length() - 1; i >= 0; --i) {
if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
profile = current_profiles_.Remove(i);
break;
}
}
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
if (profile == NULL) return NULL;
profile->CalculateTotalTicksAndSamplingRate();
@@ -586,11 +533,11 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
current_profiles_[i]->AddPath(path);
}
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 99aeb1f5c..0a4502cc1 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -41,7 +41,7 @@ struct OffsetRange;
// forever, even if they disappear from JS heap or external storage.
class StringsStorage {
public:
- StringsStorage();
+ explicit StringsStorage(Heap* heap);
~StringsStorage();
const char* GetCopy(const char* src);
@@ -63,6 +63,7 @@ class StringsStorage {
const char* AddOrDisposeString(char* str, uint32_t hash);
// Mapping of strings by String::Hash to const char* strings.
+ uint32_t hash_seed_;
HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
@@ -88,6 +89,10 @@ class CodeEntry {
INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
INLINE(int script_id() const) { return script_id_; }
INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
+ INLINE(void set_bailout_reason(const char* bailout_reason)) {
+ bailout_reason_ = bailout_reason;
+ }
+ INLINE(const char* bailout_reason() const) { return bailout_reason_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
@@ -105,6 +110,7 @@ class CodeEntry {
static const char* const kEmptyNamePrefix;
static const char* const kEmptyResourceName;
+ static const char* const kEmptyBailoutReason;
private:
Logger::LogEventsAndTags tag_ : 8;
@@ -116,6 +122,7 @@ class CodeEntry {
int shared_id_;
int script_id_;
List<OffsetRange>* no_frame_ranges_;
+ const char* bailout_reason_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -131,14 +138,10 @@ class ProfileNode {
ProfileNode* FindOrAddChild(CodeEntry* entry);
INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
- INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
INLINE(CodeEntry* entry() const) { return entry_; }
INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
- double GetSelfMillis() const;
- double GetTotalMillis() const;
unsigned id() const { return id_; }
void Print(int indent);
@@ -155,7 +158,6 @@ class ProfileNode {
ProfileTree* tree_;
CodeEntry* entry_;
- unsigned total_ticks_;
unsigned self_ticks_;
// Mapping from CodeEntry* to ProfileNode*
HashMap children_;
@@ -173,17 +175,9 @@ class ProfileTree {
ProfileNode* AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
-
- double TicksToMillis(unsigned ticks) const {
- return ticks * ms_to_ticks_scale_;
- }
ProfileNode* root() const { return root_; }
- void SetTickRatePerMs(double ticks_per_ms);
-
unsigned next_node_id() { return next_node_id_++; }
- void ShortPrint();
void Print() {
root_->Print(0);
}
@@ -195,7 +189,6 @@ class ProfileTree {
CodeEntry root_entry_;
unsigned next_node_id_;
ProfileNode* root_;
- double ms_to_ticks_scale_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@@ -216,20 +209,20 @@ class CpuProfile {
int samples_count() const { return samples_.length(); }
ProfileNode* sample(int index) const { return samples_.at(index); }
- int64_t start_time_us() const { return start_time_us_; }
- int64_t end_time_us() const { return end_time_us_; }
+ Time start_time() const { return start_time_; }
+ Time end_time() const { return end_time_; }
void UpdateTicksScale();
- void ShortPrint();
void Print();
private:
const char* title_;
unsigned uid_;
bool record_samples_;
- int64_t start_time_us_;
- int64_t end_time_us_;
+ Time start_time_;
+ Time end_time_;
+ ElapsedTimer timer_;
List<ProfileNode*> samples_;
ProfileTree top_down_;
@@ -285,7 +278,7 @@ class CodeMap {
class CpuProfilesCollection {
public:
- CpuProfilesCollection();
+ explicit CpuProfilesCollection(Heap* heap);
~CpuProfilesCollection();
bool StartProfiling(const char* title, unsigned uid, bool record_samples);
@@ -326,7 +319,7 @@ class CpuProfilesCollection {
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
- Semaphore* current_profiles_semaphore_;
+ Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 6b62ddb18..7f44b7927 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -148,7 +148,7 @@ class Representation {
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
+ return kind_ == kInteger32 || kind_ == kDouble || kind_ == kSmi;
}
const char* Mnemonic() const;
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 2ee6c2a2d..0f78ba478 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -46,7 +46,8 @@ class Descriptor BASE_EMBEDDED {
public:
MUST_USE_RESULT MaybeObject* KeyToUniqueName() {
if (!key_->IsUniqueName()) {
- MaybeObject* maybe_result = HEAP->InternalizeString(String::cast(key_));
+ MaybeObject* maybe_result =
+ key_->GetIsolate()->heap()->InternalizeString(String::cast(key_));
if (!maybe_result->To(&key_)) return maybe_result;
}
return key_;
@@ -422,12 +423,10 @@ class LookupResult BASE_EMBEDDED {
PropertyIndex GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(IsField());
return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(IsField());
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index fa792768b..7d027f880 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -163,7 +163,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int* output,
int output_size,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
RegExpStackScope stack_scope(isolate);
Address stack_base = stack_scope.stack()->stack_base();
@@ -238,7 +237,6 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset2,
size_t byte_length,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
isolate->regexp_macro_assembler_canonicalize();
// This function is not allowed to cause a garbage collection.
@@ -271,7 +269,6 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
RegExpStack* regexp_stack = isolate->regexp_stack();
size_t size = regexp_stack->stack_capacity();
Address old_stack_base = regexp_stack->stack_base();
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index fc4114af5..f3af490f1 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -39,7 +39,6 @@ RegExpStackScope::RegExpStackScope(Isolate* isolate)
RegExpStackScope::~RegExpStackScope() {
- ASSERT(Isolate::Current() == regexp_stack_->isolate_);
// Reset the buffer if it has grown.
regexp_stack_->Reset();
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index df5c35341..06335a80c 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -43,8 +43,8 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(Isolate::Current(), zone) {
- InitializeAstVisitor();
+ factory_(zone->isolate(), zone) {
+ InitializeAstVisitor(zone->isolate());
}
virtual ~Processor() { }
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 0e99650ed..95dcc4f98 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -127,7 +127,7 @@ static void GetICCounts(Code* shared_code,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
- if (FLAG_trace_opt && function->PassesHydrogenFilter()) {
+ if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
PrintF("[marking ");
function->ShortPrint();
PrintF(" for recompilation, reason: %s", reason);
@@ -139,10 +139,18 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF("]\n");
}
- if (FLAG_parallel_recompilation && !isolate_->bootstrapper()->IsActive()) {
- ASSERT(!function->IsMarkedForInstallingRecompiledCode());
+
+ if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
+ if (FLAG_concurrent_osr &&
+ isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
+ // Do not attempt regular recompilation if we already queued this for OSR.
+ // TODO(yangguo): This is necessary so that we don't install optimized
+ // code on a function that is already optimized, since OSR and regular
+ // recompilation race. This goes away as soon as OSR becomes one-shot.
+ return;
+ }
ASSERT(!function->IsInRecompileQueue());
- function->MarkForParallelRecompilation();
+ function->MarkForConcurrentRecompilation();
} else {
// The next call to the function will trigger optimization.
function->MarkForLazyRecompilation();
@@ -172,23 +180,12 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// any back edge in any unoptimized frame will trigger on-stack
// replacement for that frame.
if (FLAG_trace_osr) {
- PrintF("[patching back edges in ");
+ PrintF("[OSR - patching back edges in ");
function->PrintName();
- PrintF(" for on-stack replacement]\n");
+ PrintF("]\n");
}
- // Get the interrupt stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- Code* interrupt_code = NULL;
- InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
- if (found_code) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchInterruptCode(
- unoptimized_code, interrupt_code, replacement_code);
- }
+ Deoptimizer::PatchInterruptCode(isolate_, shared->code());
}
@@ -229,11 +226,7 @@ void RuntimeProfiler::OptimizeNow() {
if (isolate_->DebuggerHasBreakPoints()) return;
- if (FLAG_parallel_recompilation) {
- // Take this as opportunity to process the optimizing compiler thread's
- // output queue so that it does not unnecessarily keep objects alive.
- isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
- }
+ DisallowHeapAllocation no_gc;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
@@ -283,7 +276,7 @@ void RuntimeProfiler::OptimizeNow() {
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation() ||
function->IsOptimized())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 5b96d1fd4..c09fb1d49 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -80,6 +80,7 @@
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
#include "unicode/uchar.h"
@@ -289,9 +290,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
Handle<Object> result;
uint32_t element_index = 0;
- JSReceiver::StoreMode mode = value->IsJSObject()
- ? JSReceiver::FORCE_FIELD
- : JSReceiver::ALLOW_AS_CONSTANT;
+ StoreMode mode = value->IsJSObject() ? FORCE_FIELD : ALLOW_AS_CONSTANT;
if (key->IsInternalizedString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
@@ -686,10 +685,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- proxy->Fix();
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
+ JSProxy::Fix(proxy);
return isolate->heap()->undefined_value();
}
@@ -995,7 +994,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
JSArrayBuffer::cast(typed_array->buffer())->backing_store());
size_t source_byte_offset =
NumberToSize(isolate, typed_array->byte_offset());
- OS::MemCopy(
+ memcpy(
buffer->backing_store(),
backing_store + source_byte_offset,
byte_length);
@@ -1663,6 +1662,14 @@ static bool CheckAccessException(Object* callback,
(access_type == v8::ACCESS_GET && info->all_can_read()) ||
(access_type == v8::ACCESS_SET && info->all_can_write());
}
+ if (callback->IsAccessorPair()) {
+ AccessorPair* info = AccessorPair::cast(callback);
+ return
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ }
return false;
}
@@ -1944,6 +1951,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
}
+// Transform getter or setter into something DefineAccessor can handle.
+static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
+ Handle<Object> component) {
+ if (component->IsUndefined()) return isolate->factory()->null_value();
+ Handle<FunctionTemplateInfo> info =
+ Handle<FunctionTemplateInfo>::cast(component);
+ return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 6);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
+ CONVERT_SMI_ARG_CHECKED(attribute, 4);
+ CONVERT_SMI_ARG_CHECKED(access_control, 5);
+ JSObject::DefineAccessor(object,
+ name,
+ InstantiateAccessorComponent(isolate, getter),
+ InstantiateAccessorComponent(isolate, setter),
+ static_cast<PropertyAttributes>(attribute),
+ static_cast<v8::AccessControl>(access_control));
+ return isolate->heap()->undefined_value();
+}
+
+
static Failure* ThrowRedeclarationError(Isolate* isolate,
const char* type,
Handle<String> name) {
@@ -2240,9 +2276,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
if (!lookup.IsFound()) {
- return global->SetLocalPropertyIgnoreAttributes(*name,
- *value,
- attributes);
+ HandleScope handle_scope(isolate);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
+ attributes));
+ return *value;
}
if (!lookup.IsReadOnly()) {
@@ -2459,41 +2499,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_CHECKED(String, source, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
// If source is the empty string we set it to "(?:)" instead as
// suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = isolate->heap()->query_colon_string();
+ if (source->length() == 0) source = isolate->factory()->query_colon_string();
- Object* global = args[2];
- if (!global->IsTrue()) global = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, global, 2);
+ if (!global->IsTrue()) global = isolate->factory()->false_value();
- Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, ignoreCase, 3);
+ if (!ignoreCase->IsTrue()) ignoreCase = isolate->factory()->false_value();
- Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
+ if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
Map* map = regexp->map();
Object* constructor = map->constructor();
if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
+ regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
// Both true and false are immovable immortal objects so no need for write
// barrier.
regexp->InObjectPropertyAtPut(
- JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+ JSRegExp::kGlobalFieldIndex, *global, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
- JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+ JSRegExp::kIgnoreCaseFieldIndex, *ignoreCase, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
- JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
+ JSRegExp::kMultilineFieldIndex, *multiline, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
- return regexp;
+ return *regexp;
}
// Map has changed, so use generic, but slower, method.
@@ -2501,34 +2541,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Heap* heap = isolate->heap();
- MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_string(),
- source,
- final);
- // TODO(jkummerow): Turn these back into ASSERTs when we can be certain
- // that it never fires in Release mode in the wild.
- CHECK(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_string(),
- global,
- final);
- CHECK(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_string(),
- ignoreCase,
- final);
- CHECK(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_string(),
- multiline,
- final);
- CHECK(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_string(),
- Smi::FromInt(0),
- writable);
- CHECK(!result->IsFailure());
- USE(result);
- return regexp;
+ Handle<Object> zero(Smi::FromInt(0), isolate);
+ Factory* factory = isolate->factory();
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->source_string(), source, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->global_string(), global, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->ignore_case_string(), ignoreCase, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->multiline_string(), multiline, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->last_index_string(), zero, writable));
+ return *regexp;
}
@@ -2585,8 +2610,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
bool threw = false;
- Handle<Object> delegate =
- Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ Handle<Object> delegate = Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable), &threw);
if (threw) return Failure::Exception();
callable = JSFunction::cast(*delegate);
}
@@ -2604,8 +2629,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
bool threw = false;
- Handle<Object> delegate =
- Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ Handle<Object> delegate = Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable), &threw);
if (threw) return Failure::Exception();
callable = JSFunction::cast(*delegate);
}
@@ -2780,16 +2805,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
ASSERT(fun->should_have_prototype());
- Object* obj;
- { MaybeObject* maybe_obj =
- Accessors::FunctionSetPrototype(fun, args[1], NULL);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ Accessors::FunctionSetPrototype(fun, value);
return args[0]; // return TOS
}
@@ -4689,6 +4711,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) {
+ HandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
+ if (Smi::IsValid(number)) {
+ return isolate->heap()->true_value();
+ } else {
+ return isolate->heap()->false_value();
+ }
+}
+
+
// Returns a single character string where first character equals
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
@@ -4728,10 +4763,10 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype(isolate)->GetElement(index);
+ return object->GetPrototype(isolate)->GetElement(isolate, index);
}
- return object->GetElement(index);
+ return object->GetElement(isolate, index);
}
@@ -4753,7 +4788,7 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
} else {
bool has_pending_exception = false;
Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
name = Handle<Name>::cast(converted);
}
@@ -4795,7 +4830,7 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
} else {
bool has_pending_exception = false;
Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
name = Handle<Name>::cast(converted);
}
@@ -5018,9 +5053,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
- return js_object->SetLocalPropertyIgnoreAttributes(*name,
- *obj_value,
- attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, obj_value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return Runtime::ForceSetObjectProperty(isolate,
@@ -5093,7 +5129,7 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (object->IsJSProxy()) {
bool has_pending_exception = false;
Handle<Object> name = key->IsSymbol()
- ? key : Execution::ToString(key, &has_pending_exception);
+ ? key : Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
return JSProxy::cast(*object)->SetProperty(
Name::cast(*name), *value, attr, strict_mode);
@@ -5122,7 +5158,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (js_object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
value = number;
}
@@ -5141,7 +5178,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (js_object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
value = number;
}
@@ -5158,7 +5196,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<String> name = Handle<String>::cast(converted);
@@ -5203,13 +5242,17 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<String> name = Handle<String>::cast(converted);
@@ -5217,7 +5260,10 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -5252,7 +5298,8 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
} else {
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted = Execution::ToString(
+ isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
name = Handle<String>::cast(converted);
}
@@ -5419,10 +5466,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
// Compute attributes.
PropertyAttributes attributes = NONE;
if (args.length() == 4) {
@@ -5432,9 +5480,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
(unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
attributes = static_cast<PropertyAttributes>(unchecked_value);
}
-
- return object->
- SetLocalPropertyIgnoreAttributes(name, args[2], attributes);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ object, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5844,7 +5893,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
HandleScope scope(isolate);
bool exception = false;
Handle<Object> converted =
- Execution::ToString(args.at<Object>(0), &exception);
+ Execution::ToString(isolate, args.at<Object>(0), &exception);
if (exception) return Failure::Exception();
Handle<String> key = Handle<String>::cast(converted);
@@ -5853,7 +5902,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return isolate->initial_object_prototype()->GetElement(index);
+ return isolate->initial_object_prototype()->GetElement(isolate, index);
}
}
@@ -6500,8 +6549,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
+ JSObject::EnsureCanContainHeapObjectElements(result);
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastObjectElements());
@@ -6628,7 +6676,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, value, 0);
- return value->ToObject();
+ return value->ToObject(isolate);
}
@@ -6894,21 +6942,20 @@ static inline void StringBuilderConcatHelper(String* special,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException(0x14);
}
int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, special, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
- MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
+ JSObject::EnsureCanContainHeapObjectElements(array);
int special_length = special->length();
if (!array->HasFastObjectElements()) {
@@ -6990,7 +7037,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqOneByteString* answer = SeqOneByteString::cast(object);
- StringBuilderConcatHelper(special,
+ StringBuilderConcatHelper(*special,
answer->GetChars(),
fixed_array,
array_length);
@@ -7001,7 +7048,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
- StringBuilderConcatHelper(special,
+ StringBuilderConcatHelper(*special,
answer->GetChars(),
fixed_array,
array_length);
@@ -7477,7 +7524,7 @@ static Object* FlatStringCompare(String* x, String* y) {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
ASSERT(result ==
- StringCharacterStreamCompare(Isolate::Current()->runtime_state(), x, y));
+ StringCharacterStreamCompare(x->GetIsolate()->runtime_state(), x, y));
return result;
}
@@ -7918,6 +7965,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ Handle<Context> context(isolate->context());
+ PretenureFlag pretenure_flag = NOT_TENURED;
+ Handle<JSFunction> result =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ pretenure_flag);
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -8091,7 +8152,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
if (!bound_function->IsJSFunction()) {
bool exception_thrown;
- bound_function = Execution::TryGetConstructorDelegate(bound_function,
+ bound_function = Execution::TryGetConstructorDelegate(isolate,
+ bound_function,
&exception_thrown);
if (exception_thrown) return Failure::Exception();
}
@@ -8252,9 +8314,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
return function->code();
}
function->shared()->code()->set_profiler_ticks(0);
- if (JSFunction::CompileOptimized(function,
- BailoutId::None(),
- CLEAR_EXCEPTION)) {
+ if (JSFunction::CompileOptimized(function, CLEAR_EXCEPTION)) {
return function->code();
}
if (FLAG_trace_opt) {
@@ -8267,7 +8327,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
HandleScope handle_scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -8276,22 +8336,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
return isolate->heap()->undefined_value();
}
function->shared()->code()->set_profiler_ticks(0);
- ASSERT(FLAG_parallel_recompilation);
- Compiler::RecompileParallel(function);
+ ASSERT(FLAG_concurrent_recompilation);
+ if (!Compiler::RecompileConcurrent(function)) {
+ function->ReplaceCode(function->shared()->code());
+ }
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
- HandleScope handle_scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- ASSERT(V8::UseCrankshaft() && FLAG_parallel_recompilation);
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- return function->code();
-}
-
-
class ActivationsFinder : public ThreadVisitor {
public:
Code* code_;
@@ -8347,6 +8399,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
+ ASSERT(frame->function() == *function);
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
@@ -8426,9 +8479,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsParallelRecompilationSupported) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
HandleScope scope(isolate);
- return FLAG_parallel_recompilation
+ return FLAG_concurrent_recompilation
? isolate->heap()->true_value() : isolate->heap()->false_value();
}
@@ -8446,12 +8499,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
- for (int i = 0; i <= Code::kMaxLoopNestingMarker; i++) {
+ // Start patching from the currently patched loop nesting level.
+ int current_level = unoptimized->allow_osr_at_loop_nesting_level();
+ ASSERT(Deoptimizer::VerifyInterruptCode(
+ isolate, unoptimized, current_level));
+ for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
}
- } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("parallel"))) {
- function->MarkForParallelRecompilation();
+ } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent"))) {
+ function->MarkForConcurrentRecompilation();
}
}
@@ -8472,7 +8529,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
- if (!V8::UseCrankshaft()) {
+ if (!isolate->use_crankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
bool sync_with_compiler_thread = true;
@@ -8483,9 +8540,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
- while (function->IsInRecompileQueue() ||
- function->IsMarkedForInstallingRecompiledCode()) {
+ if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
+ while (function->IsInRecompileQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
}
@@ -8512,115 +8568,125 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
}
+static bool IsSuitableForOnStackReplacement(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Code> unoptimized) {
+ // Keep track of whether we've succeeded in optimizing.
+ if (!unoptimized->optimizable()) return false;
+ // If we are trying to do OSR when there are already optimized
+ // activations of the function, it means (a) the function is directly or
+ // indirectly recursive and (b) an optimized invocation has been
+ // deoptimized so that we are currently in an unoptimized activation.
+ // Check for optimized activations of this function.
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized() && frame->function() == *function) return false;
+ }
+
+ return true;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
+ Handle<Code> unoptimized(function->shared()->code(), isolate);
+
+#ifdef DEBUG
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ ASSERT_EQ(frame->function(), *function);
+ ASSERT_EQ(frame->LookupCode(), *unoptimized);
+ ASSERT(unoptimized->contains(frame->pc()));
+
+ ASSERT(pc_offset ==
+ static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
+#endif // DEBUG
// We're not prepared to handle a function with arguments object.
ASSERT(!function->shared()->uses_arguments());
- // We have hit a back edge in an unoptimized frame for a function that was
- // selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code(), isolate);
- // Keep track of whether we've succeeded in optimizing.
- bool succeeded = unoptimized->optimizable();
- if (succeeded) {
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- JavaScriptFrameIterator it(isolate);
- while (succeeded && !it.done()) {
- JavaScriptFrame* frame = it.frame();
- succeeded = !frame->is_optimized() || frame->function() != *function;
- it.Advance();
+ Handle<Code> result = Handle<Code>::null();
+ BailoutId ast_id = BailoutId::None();
+
+ if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
+ if (isolate->optimizing_compiler_thread()->
+ IsQueuedForOSR(function, pc_offset)) {
+ // Still waiting for the optimizing compiler thread to finish. Carry on.
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - polling recompile tasks for ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+ return NULL;
}
- }
- BailoutId ast_id = BailoutId::None();
- if (succeeded) {
- // The top JS function is this one, the PC is somewhere in the
- // unoptimized code.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- ASSERT(frame->function() == *function);
- ASSERT(frame->LookupCode() == *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
-
- // Use linear search of the unoptimized code's back edge table to find
- // the AST id matching the PC.
- uint32_t target_pc_offset =
- static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start());
- uint32_t loop_depth = 0;
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(*unoptimized);
- !back_edges.Done();
- back_edges.Next()) {
- if (back_edges.pc_offset() == target_pc_offset) {
- ast_id = back_edges.ast_id();
- loop_depth = back_edges.loop_depth();
- break;
+ OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+ FindReadyOSRCandidate(function, pc_offset);
+
+ if (compiler == NULL) {
+ if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
+ Compiler::RecompileConcurrent(function, pc_offset)) {
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation()) {
+ // Prevent regular recompilation if we queue this for OSR.
+ // TODO(yangguo): remove this as soon as OSR becomes one-shot.
+ function->ReplaceCode(function->shared()->code());
+ }
+ return NULL;
}
+ // Fall through to the end in case of failure.
+ } else {
+ // TODO(titzer): don't install the OSR code into the function.
+ ast_id = compiler->info()->osr_ast_id();
+ result = Compiler::InstallOptimizedCode(compiler);
}
+ } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
+ ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
ASSERT(!ast_id.IsNone());
-
if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d, loop depth %d in ",
- ast_id.ToInt(), loop_depth);
+ PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
+ // Attempt OSR compilation.
+ result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
+ }
- // Try to compile the optimized code. A true return value from
- // CompileOptimized means that compilation succeeded, not necessarily
- // that optimization succeeded.
- if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
- function->IsOptimized()) {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- function->code()->deoptimization_data());
- if (data->OsrPcOffset()->value() >= 0) {
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement offset %d in optimized code]\n",
- data->OsrPcOffset()->value());
- }
- ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
- } else {
- // We may never generate the desired OSR entry if we emit an
- // early deoptimize.
- succeeded = false;
+ // Revert the patched interrupt now, regardless of whether OSR succeeds.
+ Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
+
+ // Check whether we ended up with usable optimized code.
+ if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(result->deoptimization_data());
+
+ if (data->OsrPcOffset()->value() >= 0) {
+ ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+ ast_id.ToInt(), data->OsrPcOffset()->value());
}
- } else {
- succeeded = false;
+ // TODO(titzer): this is a massive hack to make the deopt counts
+ // match. Fix heuristics for reenabling optimizations!
+ function->shared()->increment_deopt_count();
+ return *result;
}
}
- // Revert to the original interrupt calls in the original unoptimized code.
if (FLAG_trace_osr) {
- PrintF("[restoring original interrupt calls in ");
+ PrintF("[OSR - optimization failed for ");
function->PrintName();
PrintF("]\n");
}
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(*unoptimized,
- *interrupt_code,
- *replacement_code);
-
- // If the optimization attempt succeeded, return the AST id tagged as a
- // smi. This tells the builtin that we need to translate the unoptimized
- // frame to an optimized one.
- if (succeeded) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id.ToInt());
- } else {
- if (function->IsMarkedForLazyRecompilation()) {
- function->ReplaceCode(function->shared()->code());
- }
- return Smi::FromInt(-1);
+
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation()) {
+ function->ReplaceCode(function->shared()->code());
}
+ return NULL;
}
@@ -8679,8 +8745,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
bool threw;
Handle<JSReceiver> hfun(fun);
Handle<Object> hreceiver(receiver, isolate);
- Handle<Object> result =
- Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+ Handle<Object> result = Execution::Call(
+ isolate, hfun, hreceiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8710,12 +8776,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
}
for (int i = 0; i < argc; ++i) {
- argv[i] = Object::GetElement(arguments, offset + i);
+ argv[i] = Object::GetElement(isolate, arguments, offset + i);
}
bool threw;
- Handle<Object> result =
- Execution::Call(fun, receiver, argc, argv, &threw, true);
+ Handle<Object> result = Execution::Call(
+ isolate, fun, receiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8726,7 +8792,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetFunctionDelegate(args.at<Object>(0));
+ return *Execution::GetFunctionDelegate(isolate, args.at<Object>(0));
}
@@ -8734,7 +8800,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetConstructorDelegate(args.at<Object>(0));
+ return *Execution::GetConstructorDelegate(isolate, args.at<Object>(0));
}
@@ -8783,7 +8849,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
extension_object = JSReceiver::cast(args[0]);
} else {
// Convert the object to a proper JavaScript object.
- MaybeObject* maybe_js_object = args[0]->ToObject();
+ MaybeObject* maybe_js_object = args[0]->ToObject(isolate);
if (!maybe_js_object->To(&extension_object)) {
if (Failure::cast(maybe_js_object)->IsInternalError()) {
HandleScope scope(isolate);
@@ -8937,7 +9003,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
IsImmutableVariableMode(mode) ? FROZEN : SEALED;
Handle<AccessorInfo> info =
Accessors::MakeModuleExport(name, index, attr);
- Handle<Object> result = SetAccessor(module, info);
+ Handle<Object> result = JSObject::SetAccessor(module, info);
ASSERT(!(result.is_null() || result->IsUndefined()));
USE(result);
break;
@@ -9303,7 +9369,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
// First check if this is a real stack overflow.
if (isolate->stack_guard()->IsStackOverflow()) {
- SealHandleScope shs(isolate);
return isolate->StackOverflow();
}
@@ -9311,6 +9376,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // First check if this is a real stack overflow.
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ SealHandleScope shs(isolate);
+ return isolate->StackOverflow();
+ }
+
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ return (function->IsOptimized()) ? function->code()
+ : function->shared()->code();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -9423,9 +9505,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
- MaybeObject* maybe_result_array =
- output->EnsureCanContainHeapObjectElements();
- if (maybe_result_array->IsFailure()) return maybe_result_array;
+ JSObject::EnsureCanContainHeapObjectElements(output);
RUNTIME_ASSERT(output->HasFastObjectElements());
DisallowHeapAllocation no_gc;
@@ -10097,7 +10177,7 @@ static bool IterateElements(Isolate* isolate,
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- element_value = Object::GetElement(receiver, j);
+ element_value = Object::GetElement(isolate, receiver, j);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
@@ -10122,7 +10202,8 @@ static bool IterateElements(Isolate* isolate,
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- Handle<Object> element_value = Object::GetElement(receiver, j);
+ Handle<Object> element_value =
+ Object::GetElement(isolate, receiver, j);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
@@ -10141,7 +10222,7 @@ static bool IterateElements(Isolate* isolate,
while (j < n) {
HandleScope loop_scope(isolate);
uint32_t index = indices[j];
- Handle<Object> element = Object::GetElement(receiver, index);
+ Handle<Object> element = Object::GetElement(isolate, receiver, index);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
@@ -10521,7 +10602,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- return Execution::DebugBreakHelper();
+ return Execution::DebugBreakHelper(isolate);
}
@@ -12021,6 +12102,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(isolate, id);
+ RUNTIME_ASSERT(!frame_it.done());
+
JavaScriptFrame* frame = frame_it.frame();
Handle<JSFunction> fun =
@@ -12040,11 +12123,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
int current_statement_pos = break_location_iterator.statement_position();
while (!break_location_iterator.Done()) {
+ bool accept;
if (break_location_iterator.pc() > frame->pc()) {
+ accept = true;
+ } else {
+ StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
+ // The break point is near our pc. Could be a step-in possibility,
+ // that is currently taken by active debugger call.
+ if (break_frame_id == StackFrame::NO_ID) {
+ // We are not stepping.
+ accept = false;
+ } else {
+ JavaScriptFrameIterator additional_frame_it(isolate, break_frame_id);
+ // If our frame is a top frame and we are stepping, we can do step-in
+ // at this place.
+ accept = additional_frame_it.frame()->id() == id;
+ }
+ }
+ if (accept) {
if (break_location_iterator.IsStepInLocation(isolate)) {
Smi* position_value = Smi::FromInt(break_location_iterator.position());
JSObject::SetElement(array, len,
@@ -12476,7 +12576,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
// of frames to step down.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
// Check arguments.
Object* check;
{ MaybeObject* maybe_check = Runtime_CheckExecutionState(
@@ -12487,6 +12587,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
+ CONVERT_NUMBER_CHECKED(int, wrapped_frame_id, Int32, args[3]);
+
+ StackFrame::Id frame_id;
+ if (wrapped_frame_id == 0) {
+ frame_id = StackFrame::NO_ID;
+ } else {
+ frame_id = UnwrapFrameId(wrapped_frame_id);
+ }
+
// Get the step action and check validity.
StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
if (step_action != StepIn &&
@@ -12497,6 +12606,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
+ if (frame_id != StackFrame::NO_ID && step_action != StepNext &&
+ step_action != StepMin && step_action != StepOut) {
+ return isolate->ThrowIllegalOperation();
+ }
+
// Get the number of steps.
int step_count = NumberToInt32(args[2]);
if (step_count < 1) {
@@ -12508,7 +12622,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
// Prepare step.
isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count);
+ step_count,
+ frame_id);
return isolate->heap()->undefined_value();
}
@@ -12574,7 +12689,7 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
shared, context, NOT_TENURED);
bool pending_exception;
Handle<Object> result = Execution::Call(
- eval_fun, receiver, 0, NULL, &pending_exception);
+ isolate, eval_fun, receiver, 0, NULL, &pending_exception);
if (pending_exception) return Failure::Exception();
@@ -12613,7 +12728,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Object> context_extension(args[5], isolate);
// Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
+ DisableBreak disable_break_save(isolate, disable_break);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -12680,7 +12795,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
Handle<Object> context_extension(args[3], isolate);
// Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
+ DisableBreak disable_break_save(isolate, disable_break);
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
@@ -12925,7 +13040,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
}
FixedArray* instances = FixedArray::cast(object);
- ASSERT(HEAP->IsHeapIterable());
+ ASSERT(isolate->heap()->IsHeapIterable());
// Fill the referencing objects.
HeapIterator heap_iterator2(heap);
count = DebugConstructedBy(&heap_iterator2,
@@ -12976,7 +13091,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- CPU::DebugBreak();
+ OS::DebugBreak();
return isolate->heap()->undefined_value();
}
@@ -13345,11 +13460,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
+ result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL,
&pending_exception);
} else {
- EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
+ EnterDebugger enter_debugger(isolate);
+ result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL,
&pending_exception);
}
}
@@ -13510,7 +13633,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
Handle<Name> base =
isolate->factory()->NewStringFromAscii(CStrVector("base"));
for (unsigned int i = 0; i < length; ++i) {
- MaybeObject* maybe_string = input->GetElement(i);
+ MaybeObject* maybe_string = input->GetElement(isolate, i);
Object* locale_id;
if (!maybe_string->ToObject(&locale_id) || !locale_id->IsString()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
@@ -13623,11 +13746,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
isolate->factory()->NewStringFromAscii(CStrVector("valid")),
NONE));
- Persistent<v8::Object> wrapper(reinterpret_cast<v8::Isolate*>(isolate),
- v8::Utils::ToLocal(local_object));
// Make object handle weak so we can delete the data format once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &DateFormat::DeleteDateFormat);
- wrapper.ClearAndLeak();
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ DateFormat::DeleteDateFormat);
return *local_object;
}
@@ -13641,7 +13764,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) {
CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
bool has_pending_exception = false;
- Handle<Object> value = Execution::ToNumber(date, &has_pending_exception);
+ Handle<Object> value =
+ Execution::ToNumber(isolate, date, &has_pending_exception);
if (has_pending_exception) {
ASSERT(isolate->has_pending_exception());
return Failure::Exception();
@@ -13681,7 +13805,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) {
bool has_pending_exception = false;
Handle<JSDate> result = Handle<JSDate>::cast(
- Execution::NewDate(static_cast<double>(date), &has_pending_exception));
+ Execution::NewDate(
+ isolate, static_cast<double>(date), &has_pending_exception));
if (has_pending_exception) {
ASSERT(isolate->has_pending_exception());
return Failure::Exception();
@@ -13726,12 +13851,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
isolate->factory()->NewStringFromAscii(CStrVector("valid")),
NONE));
- Persistent<v8::Object> wrapper(reinterpret_cast<v8::Isolate*>(isolate),
- v8::Utils::ToLocal(local_object));
- // Make object handle weak so we can delete the number format once GC kicks
- // in.
- wrapper.MakeWeak<void>(NULL, &NumberFormat::DeleteNumberFormat);
- wrapper.ClearAndLeak();
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ NumberFormat::DeleteNumberFormat);
return *local_object;
}
@@ -13745,7 +13868,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
bool has_pending_exception = false;
- Handle<Object> value = Execution::ToNumber(number, &has_pending_exception);
+ Handle<Object> value = Execution::ToNumber(
+ isolate, number, &has_pending_exception);
if (has_pending_exception) {
ASSERT(isolate->has_pending_exception());
return Failure::Exception();
@@ -13839,11 +13963,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
isolate->factory()->NewStringFromAscii(CStrVector("valid")),
NONE));
- Persistent<v8::Object> wrapper(reinterpret_cast<v8::Isolate*>(isolate),
- v8::Utils::ToLocal(local_object));
- // Make object handle weak so we can delete the collator once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &Collator::DeleteCollator);
- wrapper.ClearAndLeak();
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ Collator::DeleteCollator);
return *local_object;
}
@@ -13874,6 +13997,158 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
return *isolate->factory()->NewNumberFromInt(result);
}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> break_iterator_template =
+ I18N::GetTemplate2(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ break_iterator_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set break iterator as internal field of the resulting JS object.
+ icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
+ isolate, locale, options, resolved);
+
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(break_iterator));
+ // Make sure that the pointer to adopted text is NULL.
+ local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ // Make object handle weak so we can delete the break iterator once GC kicks
+ // in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ BreakIterator::DeleteBreakIterator);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorAdoptText) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
+ break_iterator_holder->GetInternalField(1));
+ delete u_text;
+
+ v8::String::Value text_value(v8::Utils::ToLocal(text));
+ u_text = new icu::UnicodeString(
+ reinterpret_cast<const UChar*>(*text_value), text_value.length());
+ break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
+
+ break_iterator->setText(*u_text);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorFirst) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->first());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorNext) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->next());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorCurrent) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->current());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorBreakType) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+ icu::RuleBasedBreakIterator* rule_based_iterator =
+ static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+ int32_t status = rule_based_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("none"));
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("number"));
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("letter"));
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("kana"));
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("ideo"));
+ } else {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("unknown"));
+ }
+}
#endif // V8_I18N_SUPPORT
@@ -13952,13 +14227,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) {
HandleScope scope(isolate);
ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_CHECKED(JSObject, error_object, 0);
- String* key = isolate->heap()->hidden_stack_trace_string();
- Object* result = error_object->GetHiddenProperty(key);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+ Handle<String> key = isolate->factory()->hidden_stack_trace_string();
+ Handle<Object> result(error_object->GetHiddenProperty(*key), isolate);
if (result->IsTheHole()) return isolate->heap()->undefined_value();
RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
- error_object->DeleteHiddenProperty(key);
- return result;
+ JSObject::DeleteHiddenProperty(error_object, key);
+ return *result;
}
@@ -14064,7 +14339,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
bool pending_exception;
- value = Execution::Call(factory,
+ value = Execution::Call(isolate,
+ factory,
receiver,
ARRAY_SIZE(argv),
argv,
@@ -14232,6 +14508,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessCheckNeeded) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsAccessCheckNeeded());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -14309,6 +14593,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
+ ASSERT(object->IsAccessCheckNeeded());
+ Handle<Object> key = args.at<Object>(2);
+ SaveContext save(isolate);
+ isolate->set_context(observer->context());
+ if (!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ return isolate->heap()->false_value();
+ }
+ bool access_allowed = false;
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index) ||
+ (key->IsString() && String::cast(*key)->AsArrayIndex(&index))) {
+ access_allowed =
+ isolate->MayIndexedAccess(*object, index, v8::ACCESS_GET) &&
+ isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS);
+ } else {
+ access_allowed = isolate->MayNamedAccess(*object, *key, v8::ACCESS_GET) &&
+ isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS);
+ }
+ return isolate->heap()->ToBoolean(access_allowed);
+}
+
+
static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<Object> type_info,
@@ -14433,7 +14745,6 @@ static const Runtime::Function kIntrinsicFunctions[] = {
MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
Object* dictionary) {
- ASSERT(Isolate::Current()->heap() == heap);
ASSERT(dictionary != NULL);
ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 687afbd8f..60c667711 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -87,20 +87,20 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
- F(InstallRecompiledCode, 1, 1) \
+ F(ConcurrentRecompile, 1, 1) \
+ F(TryInstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
- F(IsParallelRecompilationSupported, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
+ F(CompileForOnStackReplacement, 2, 1) \
F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInOldPointerSpace, 1, 1) \
@@ -221,7 +221,8 @@ namespace internal {
F(NumberToRadixString, 2, 1) \
F(NumberToFixed, 2, 1) \
F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1)
+ F(NumberToPrecision, 2, 1) \
+ F(IsValidSmi, 1, 1)
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
@@ -258,6 +259,7 @@ namespace internal {
F(GetTemplateField, 2, 1) \
F(DisableAccessChecks, 1, 1) \
F(EnableAccessChecks, 1, 1) \
+ F(SetAccessorProperty, 6, 1) \
\
/* Dates */ \
F(DateCurrentTime, 0, 1) \
@@ -356,6 +358,7 @@ namespace internal {
F(GetObservationState, 0, 1) \
F(ObservationWeakMapCreate, 0, 1) \
F(UnwrapGlobalProxy, 1, 1) \
+ F(IsAccessAllowedForObserver, 3, 1) \
\
/* Harmony typed arrays */ \
F(ArrayBufferInitialize, 2, 1)\
@@ -394,6 +397,7 @@ namespace internal {
\
/* Statements */ \
F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
F(FinalizeInstanceSize, 1, 1) \
@@ -466,7 +470,8 @@ namespace internal {
F(HasExternalDoubleElements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
- F(HaveSameMap, 2, 1)
+ F(HaveSameMap, 2, 1) \
+ F(IsAccessCheckNeeded, 1, 1)
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -501,7 +506,7 @@ namespace internal {
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 3, 1) \
+ F(PrepareStep, 4, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 6, 1) \
F(DebugEvaluateGlobal, 4, 1) \
@@ -559,6 +564,14 @@ namespace internal {
/* Collator. */ \
F(CreateCollator, 3, 1) \
F(InternalCompare, 3, 1) \
+ \
+ /* Break iterator. */ \
+ F(CreateBreakIterator, 3, 1) \
+ F(BreakIteratorAdoptText, 2, 1) \
+ F(BreakIteratorFirst, 1, 1) \
+ F(BreakIteratorNext, 1, 1) \
+ F(BreakIteratorCurrent, 1, 1) \
+ F(BreakIteratorBreakType, 1, 1) \
#else
#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 1d0cdedd1..0aaa1e9b7 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -27,9 +27,7 @@
#include "sampler.h"
-#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
- || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__) \
- || defined(__native_client__)
+#if V8_OS_POSIX && !V8_OS_CYGWIN
#define USE_SIGNALS
@@ -38,26 +36,25 @@
#include <signal.h>
#include <sys/time.h>
#include <sys/syscall.h>
+
+#if V8_OS_MACOSX
+#include <mach/mach.h>
// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#if (!defined(__ANDROID__) || defined(__BIONIC_HAVE_UCONTEXT_T)) \
- && !defined(__OpenBSD__)
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) \
+ && !V8_OS_OPENBSD
#include <ucontext.h>
#endif
#include <unistd.h>
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
-#elif defined(__MACH__)
-
-#include <mach/mach.h>
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
#include "win32-headers.h"
@@ -65,7 +62,7 @@
#include "v8.h"
-#include "cpu-profiler.h"
+#include "cpu-profiler-inl.h"
#include "flags.h"
#include "frames-inl.h"
#include "log.h"
@@ -75,7 +72,7 @@
#include "vm-state-inl.h"
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
// Not all versions of Android's C library provide ucontext_t.
// Detect this and provide custom but compatible definitions. Note that these
@@ -147,7 +144,7 @@ typedef struct ucontext {
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
-#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
+#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
namespace v8 {
@@ -180,27 +177,7 @@ class Sampler::PlatformData : public PlatformDataCommon {
pthread_t vm_tid_;
};
-#elif defined(__MACH__)
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
- PlatformData() : profiled_thread_(mach_thread_self()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
-};
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
// ----------------------------------------------------------------------------
// Win32 profiler support. On Cygwin we use the same sampler implementation as
@@ -271,8 +248,25 @@ class SimulatorHelper {
class SignalHandler : public AllStatic {
public:
- static inline void EnsureInstalled() {
- if (signal_handler_installed_) return;
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void IncreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (++client_count_ == 1) Install();
+ }
+
+ static void DecreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (--client_count_ == 0) Restore();
+ }
+
+ static bool Installed() {
+ return signal_handler_installed_;
+ }
+
+ private:
+ static void Install() {
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
@@ -281,30 +275,31 @@ class SignalHandler : public AllStatic {
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
- static inline void Restore() {
+ static void Restore() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
- static inline bool Installed() {
- return signal_handler_installed_;
- }
-
- private:
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static int client_count_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
};
+
+Mutex* SignalHandler::mutex_ = NULL;
+int SignalHandler::client_count_ = 0;
struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
-#if defined(__native_client__)
+#if V8_OS_NACL
// As Native Client does not support signal handling, profiling
// is disabled.
return;
@@ -322,7 +317,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
}
Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
+ if (sampler == NULL) return;
RegisterState state;
@@ -333,10 +328,10 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !defined(__OpenBSD__)
+#if !V8_OS_OPENBSD
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
-#if defined(__linux__) || defined(__ANDROID__)
+#if V8_OS_LINUX
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
@@ -364,7 +359,29 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
-#elif defined(__FreeBSD__)
+#elif V8_OS_MACOSX
+#if V8_HOST_ARCH_X64
+#if __DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
+ state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
+ state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
+#else // !__DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
+ state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
+ state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
+#endif // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+#if __DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
+ state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
+ state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
+#else // !__DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
+ state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
+ state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
+#endif // __DARWIN_UNIX03
+#endif // V8_HOST_ARCH_IA32
+#elif V8_OS_FREEBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
@@ -378,7 +395,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
-#elif defined(__NetBSD__)
+#elif V8_OS_NETBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
@@ -388,7 +405,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH_*
-#elif defined(__OpenBSD__)
+#elif V8_OS_OPENBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
@@ -398,14 +415,14 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH_*
-#elif defined(__sun)
+#elif V8_OS_SOLARIS
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#endif // __sun
+#endif // V8_OS_SOLARIS
#endif // USE_SIMULATOR
sampler->SampleStack(state);
-#endif // __native_client__
+#endif // V8_OS_NACL
}
#endif
@@ -419,12 +436,12 @@ class SamplerThread : public Thread {
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; mutex_ = NULL; }
static void AddActiveSampler(Sampler* sampler) {
bool need_to_start = false;
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
@@ -437,16 +454,13 @@ class SamplerThread : public Thread {
ASSERT(instance_->interval_ == sampler->interval());
instance_->active_samplers_.Add(sampler);
-#if defined(USE_SIGNALS)
- SignalHandler::EnsureInstalled();
-#endif
if (need_to_start) instance_->StartSynchronously();
}
static void RemoveActiveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
ASSERT(sampler->IsActive());
bool removed = instance_->active_samplers_.RemoveElement(sampler);
@@ -458,9 +472,6 @@ class SamplerThread : public Thread {
if (instance_->active_samplers_.is_empty()) {
instance_to_remove = instance_;
instance_ = NULL;
-#if defined(USE_SIGNALS)
- SignalHandler::Restore();
-#endif
}
}
@@ -473,7 +484,7 @@ class SamplerThread : public Thread {
virtual void Run() {
while (true) {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
@@ -481,7 +492,7 @@ class SamplerThread : public Thread {
Sampler* sampler = active_samplers_.at(i);
if (!sampler->isolate()->IsInitialized()) continue;
if (!sampler->IsProfiling()) continue;
- SampleContext(sampler);
+ sampler->DoSample();
}
}
OS::Sleep(interval_);
@@ -489,109 +500,6 @@ class SamplerThread : public Thread {
}
private:
-#if defined(USE_SIGNALS)
-
- void SampleContext(Sampler* sampler) {
- if (!SignalHandler::Installed()) return;
- pthread_t tid = sampler->platform_data()->vm_tid();
- pthread_kill(tid, SIGPROF);
- }
-
-#elif defined(__MACH__)
-
- void SampleContext(Sampler* sampler) {
- thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
-
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- Isolate* isolate = sampler->isolate();
- if (!helper.Init(sampler, isolate)) return;
-#endif
-
- if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
-
-#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t thread_state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __r ## name
-#else
-#define REGISTER_FIELD(name) r ## name
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t thread_state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __e ## name
-#else
-#define REGISTER_FIELD(name) e ## name
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH
-
- if (thread_get_state(profiled_thread,
- flavor,
- reinterpret_cast<natural_t*>(&thread_state),
- &count) == KERN_SUCCESS) {
- RegisterState state;
-#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
-#else
- state.pc = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(ip));
- state.sp = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(sp));
- state.fp = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(bp));
-#endif // USE_SIMULATOR
-#undef REGISTER_FIELD
- sampler->SampleStack(state);
- }
- thread_resume(profiled_thread);
- }
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- Isolate* isolate = sampler->isolate();
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(sampler, isolate)) return;
-#endif
-
- const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
- RegisterState state;
-#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
-#else
-#if V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(context.Rip);
- state.sp = reinterpret_cast<Address>(context.Rsp);
- state.fp = reinterpret_cast<Address>(context.Rbp);
-#else
- state.pc = reinterpret_cast<Address>(context.Eip);
- state.sp = reinterpret_cast<Address>(context.Esp);
- state.fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-#endif // USE_SIMULATOR
- sampler->SampleStack(state);
- }
- ResumeThread(profiled_thread);
- }
-
-#endif // USE_SIGNALS
-
-
// Protects the process wide state below.
static Mutex* mutex_;
static SamplerThread* instance_;
@@ -652,12 +560,18 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
void Sampler::SetUp() {
+#if defined(USE_SIGNALS)
+ SignalHandler::SetUp();
+#endif
SamplerThread::SetUp();
}
void Sampler::TearDown() {
SamplerThread::TearDown();
+#if defined(USE_SIGNALS)
+ SignalHandler::TearDown();
+#endif
}
@@ -665,6 +579,7 @@ Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
+ has_processing_thread_(false),
active_(false),
is_counting_samples_(false),
js_and_external_sample_count_(0) {
@@ -692,8 +607,24 @@ void Sampler::Stop() {
}
+void Sampler::IncreaseProfilingDepth() {
+ NoBarrier_AtomicIncrement(&profiling_, 1);
+#if defined(USE_SIGNALS)
+ SignalHandler::IncreaseSamplerCount();
+#endif
+}
+
+
+void Sampler::DecreaseProfilingDepth() {
+#if defined(USE_SIGNALS)
+ SignalHandler::DecreaseSamplerCount();
+#endif
+ NoBarrier_AtomicIncrement(&profiling_, -1);
+}
+
+
void Sampler::SampleStack(const RegisterState& state) {
- TickSample* sample = isolate_->cpu_profiler()->TickSampleEvent();
+ TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
sample->Init(isolate_, state);
@@ -703,6 +634,58 @@ void Sampler::SampleStack(const RegisterState& state) {
}
}
Tick(sample);
+ if (sample != &sample_obj) {
+ isolate_->cpu_profiler()->FinishTickSample();
+ }
+}
+
+
+#if defined(USE_SIGNALS)
+
+void Sampler::DoSample() {
+ if (!SignalHandler::Installed()) return;
+ pthread_kill(platform_data()->vm_tid(), SIGPROF);
}
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+void Sampler::DoSample() {
+ HANDLE profiled_thread = platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
+
+#if defined(USE_SIMULATOR)
+ SimulatorHelper helper;
+ if (!helper.Init(this, isolate())) return;
+#endif
+
+ const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
+ RegisterState state;
+#if defined(USE_SIMULATOR)
+ helper.FillRegisters(&state);
+#else
+#if V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(context.Rip);
+ state.sp = reinterpret_cast<Address>(context.Rsp);
+ state.fp = reinterpret_cast<Address>(context.Rbp);
+#else
+ state.pc = reinterpret_cast<Address>(context.Eip);
+ state.sp = reinterpret_cast<Address>(context.Esp);
+ state.fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+#endif // USE_SIMULATOR
+ SampleStack(state);
+ }
+ ResumeThread(profiled_thread);
+}
+
+#endif // USE_SIGNALS
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h
index 80ccc087c..b17a2ed8d 100644
--- a/deps/v8/src/sampler.h
+++ b/deps/v8/src/sampler.h
@@ -94,14 +94,24 @@ class Sampler {
void Start();
void Stop();
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return NoBarrier_Load(&profiling_) > 0 &&
+ !NoBarrier_Load(&has_processing_thread_);
+ }
+ void IncreaseProfilingDepth();
+ void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
+ void DoSample();
+ // If true next sample must be initiated on the profiler event processor
+ // thread right after latest sample is processed.
+ void SetHasProcessingThread(bool value) {
+ NoBarrier_Store(&has_processing_thread_, value);
+ }
+
// Used in tests to make sure that stack sampling is performed.
unsigned js_and_external_sample_count() const {
return js_and_external_sample_count_;
@@ -125,6 +135,7 @@ class Sampler {
Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
+ Atomic32 has_processing_thread_;
Atomic32 active_;
PlatformData* data_; // Platform specific data.
bool is_counting_samples_;
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index c9df1fb58..ba138f2ad 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -74,7 +74,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
+ parameter_count + stack_local_count + 2 * context_local_count
+ (has_function_name ? 2 : 0);
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = zone->isolate()->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
// Encode the flags.
@@ -445,7 +445,8 @@ void ContextSlotCache::Update(Object* data,
int slot_index) {
String* internalized_name;
ASSERT(slot_index > kNotFound);
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
+ if (name->GetIsolate()->heap()->InternalizeStringIfExists(
+ name, &internalized_name)) {
int index = Hash(data, internalized_name);
Key& key = keys_[index];
key.data = data;
@@ -472,7 +473,8 @@ void ContextSlotCache::ValidateEntry(Object* data,
InitializationFlag init_flag,
int slot_index) {
String* internalized_name;
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
+ if (name->GetIsolate()->heap()->InternalizeStringIfExists(
+ name, &internalized_name)) {
int index = Hash(data, name);
Key& key = keys_[index];
ASSERT(key.data == data);
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index e631332d5..ce1741a62 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -129,7 +129,7 @@ Scope::Scope(Scope* inner_scope,
ScopeType scope_type,
Handle<ScopeInfo> scope_info,
Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(4, zone),
variables_(zone),
internals_(4, zone),
@@ -152,7 +152,7 @@ Scope::Scope(Scope* inner_scope,
Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(1, zone),
variables_(zone),
internals_(0, zone),
@@ -907,26 +907,32 @@ void Scope::Print(int n) {
PrintF("%d heap slots\n", num_heap_slots_); }
// Print locals.
- Indent(n1, "// function var\n");
if (function_ != NULL) {
+ Indent(n1, "// function var:\n");
PrintVar(n1, function_->proxy()->var());
}
- Indent(n1, "// temporary vars\n");
- for (int i = 0; i < temps_.length(); i++) {
- PrintVar(n1, temps_[i]);
+ if (temps_.length() > 0) {
+ Indent(n1, "// temporary vars:\n");
+ for (int i = 0; i < temps_.length(); i++) {
+ PrintVar(n1, temps_[i]);
+ }
}
- Indent(n1, "// internal vars\n");
- for (int i = 0; i < internals_.length(); i++) {
- PrintVar(n1, internals_[i]);
+ if (internals_.length() > 0) {
+ Indent(n1, "// internal vars:\n");
+ for (int i = 0; i < internals_.length(); i++) {
+ PrintVar(n1, internals_[i]);
+ }
}
- Indent(n1, "// local vars\n");
- PrintMap(n1, &variables_);
+ if (variables_.Start() != NULL) {
+ Indent(n1, "// local vars:\n");
+ PrintMap(n1, &variables_);
+ }
- Indent(n1, "// dynamic vars\n");
if (dynamics_ != NULL) {
+ Indent(n1, "// dynamic vars:\n");
PrintMap(n1, dynamics_->GetMap(DYNAMIC));
PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
@@ -1086,7 +1092,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// Assignment to const. Throw a syntax error.
MessageLocation location(
info->script(), proxy->position(), proxy->position());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(0);
Handle<Object> result =
@@ -1117,7 +1123,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// TODO(rossberg): generate more helpful error message.
MessageLocation location(
info->script(), proxy->position(), proxy->position());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 746c92665..d05dd2612 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -596,9 +596,9 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
}
-ExternalReferenceEncoder::ExternalReferenceEncoder()
+ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
: encodings_(Match),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
for (int i = 0; i < external_references->size(); ++i) {
@@ -638,9 +638,9 @@ void ExternalReferenceEncoder::Put(Address key, int index) {
}
-ExternalReferenceDecoder::ExternalReferenceDecoder()
+ExternalReferenceDecoder::ExternalReferenceDecoder(Isolate* isolate)
: encodings_(NewArray<Address*>(kTypeCodeCount)),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
@@ -780,13 +780,12 @@ class CodeAddressMap: public CodeEventLogger {
CodeAddressMap* Serializer::code_address_map_ = NULL;
-void Serializer::Enable() {
+void Serializer::Enable(Isolate* isolate) {
if (!serialization_enabled_) {
ASSERT(!too_late_to_enable_now_);
}
if (serialization_enabled_) return;
serialization_enabled_ = true;
- i::Isolate* isolate = Isolate::Current();
isolate->InitializeLoggingAndCounters();
code_address_map_ = new CodeAddressMap(isolate);
}
@@ -810,8 +809,8 @@ Deserializer::Deserializer(SnapshotByteSource* source)
}
-void Deserializer::Deserialize() {
- isolate_ = Isolate::Current();
+void Deserializer::Deserialize(Isolate* isolate) {
+ isolate_ = isolate;
ASSERT(isolate_ != NULL);
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
// No active threads.
@@ -819,7 +818,7 @@ void Deserializer::Deserialize() {
// No active handles.
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder();
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -850,14 +849,14 @@ void Deserializer::Deserialize() {
}
-void Deserializer::DeserializePartial(Object** root) {
- isolate_ = Isolate::Current();
+void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
+ isolate_ = isolate;
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
ASSERT(reservations_[i] != kUninitializedReservation);
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
if (external_reference_decoder_ == NULL) {
- external_reference_decoder_ = new ExternalReferenceDecoder();
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
}
// Keep track of the code space start and end pointers in case new
@@ -1277,12 +1276,12 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
}
-Serializer::Serializer(SnapshotByteSink* sink)
- : sink_(sink),
+Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+ : isolate_(isolate),
+ sink_(sink),
current_root_index_(0),
- external_reference_encoder_(new ExternalReferenceEncoder),
+ external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
- isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
ASSERT(isolate_->IsDefaultIsolate());
@@ -1298,9 +1297,9 @@ Serializer::~Serializer() {
void StartupSerializer::SerializeStrongReferences() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();
// No active threads.
- CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
+ CHECK_EQ(NULL, isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
@@ -1308,7 +1307,7 @@ void StartupSerializer::SerializeStrongReferences() {
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
- HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1319,7 +1318,7 @@ void PartialSerializer::Serialize(Object** object) {
void Serializer::VisitPointers(Object** start, Object** end) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();;
for (Object** current = start; current < end; current++) {
if (start == isolate->heap()->roots_array_start()) {
@@ -1350,9 +1349,9 @@ void Serializer::VisitPointers(Object** start, Object** end) {
// that correspond to the elements of this cache array. On deserialization we
// therefore need to visit the cache array. This fills it up with pointers to
// deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+void SerializerDeserializer::Iterate(Isolate* isolate,
+ ObjectVisitor* visitor) {
if (Serializer::enabled()) return;
- Isolate* isolate = Isolate::Current();
for (int i = 0; ; i++) {
if (isolate->serialize_partial_snapshot_cache_length() <= i) {
// Extend the array ready to get a value from the visitor when
@@ -1371,7 +1370,7 @@ void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();
for (int i = 0;
i < isolate->serialize_partial_snapshot_cache_length();
@@ -1394,7 +1393,7 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
- Heap* heap = HEAP;
+ Heap* heap = isolate()->heap();
if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
@@ -1484,10 +1483,9 @@ void StartupSerializer::SerializeWeakReferences() {
// will contain some references needed to decode the partial snapshot. We
// add one entry with 'undefined' which is the sentinel that the deserializer
// uses to know it is done deserializing the array.
- Isolate* isolate = Isolate::Current();
- Object* undefined = isolate->heap()->undefined_value();
+ Object* undefined = isolate()->heap()->undefined_value();
VisitPointer(&undefined);
- HEAP->IterateWeakRoots(this, VISIT_ALL);
+ isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
Pad();
}
@@ -1500,7 +1498,7 @@ void Serializer::PutRoot(int root_index,
if (how_to_code == kPlain &&
where_to_point == kStartOfObject &&
root_index < kRootArrayNumberOfConstantEncodings &&
- !HEAP->InNewSpace(object)) {
+ !isolate()->heap()->InNewSpace(object)) {
if (skip == 0) {
sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
"RootConstant");
@@ -1633,7 +1631,7 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
root_index != kInvalidRootIndex &&
root_index < kRootArrayNumberOfConstantEncodings &&
current_contents == current[-1]) {
- ASSERT(!HEAP->InNewSpace(current_contents));
+ ASSERT(!serializer_->isolate()->heap()->InNewSpace(current_contents));
int repeat_count = 1;
while (current < end - 1 && current[repeat_count] == current_contents) {
repeat_count++;
@@ -1750,7 +1748,8 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = HEAP->natives_source_cache()->get(i);
+ Object* source =
+ serializer_->isolate()->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
@@ -1819,7 +1818,7 @@ int Serializer::ObjectSerializer::OutputRawData(
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
+ if (object->GetHeap()->InSpace(object, s)) {
ASSERT(i < kNumberOfSpaces);
return i;
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 563f0a06d..020a744fc 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -110,7 +110,7 @@ class ExternalReferenceTable {
class ExternalReferenceEncoder {
public:
- ExternalReferenceEncoder();
+ explicit ExternalReferenceEncoder(Isolate* isolate);
uint32_t Encode(Address key) const;
@@ -134,7 +134,7 @@ class ExternalReferenceEncoder {
class ExternalReferenceDecoder {
public:
- ExternalReferenceDecoder();
+ explicit ExternalReferenceDecoder(Isolate* isolate);
~ExternalReferenceDecoder();
Address Decode(uint32_t key) const {
@@ -208,7 +208,7 @@ class SnapshotByteSource {
// both.
class SerializerDeserializer: public ObjectVisitor {
public:
- static void Iterate(ObjectVisitor* visitor);
+ static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
static int nop() { return kNop; }
@@ -325,10 +325,10 @@ class Deserializer: public SerializerDeserializer {
virtual ~Deserializer();
// Deserialize the snapshot into an empty heap.
- void Deserialize();
+ void Deserialize(Isolate* isolate);
// Deserialize a single object and the objects reachable from it.
- void DeserializePartial(Object** root);
+ void DeserializePartial(Isolate* isolate, Object** root);
void set_reservation(int space_number, int reservation) {
ASSERT(space_number >= 0);
@@ -464,7 +464,7 @@ class CodeAddressMap;
// There can be only one serializer per V8 process.
class Serializer : public SerializerDeserializer {
public:
- explicit Serializer(SnapshotByteSink* sink);
+ Serializer(Isolate* isolate, SnapshotByteSink* sink);
~Serializer();
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
@@ -474,7 +474,8 @@ class Serializer : public SerializerDeserializer {
return fullness_[space];
}
- static void Enable();
+ Isolate* isolate() const { return isolate_; }
+ static void Enable(Isolate* isolate);
static void Disable();
// Call this when you have made use of the fact that there is no serialization
@@ -593,9 +594,10 @@ class Serializer : public SerializerDeserializer {
class PartialSerializer : public Serializer {
public:
- PartialSerializer(Serializer* startup_snapshot_serializer,
+ PartialSerializer(Isolate* isolate,
+ Serializer* startup_snapshot_serializer,
SnapshotByteSink* sink)
- : Serializer(sink),
+ : Serializer(isolate, sink),
startup_serializer_(startup_snapshot_serializer) {
set_root_index_wave_front(Heap::kStrongRootListLength);
}
@@ -618,7 +620,8 @@ class PartialSerializer : public Serializer {
return o->IsName() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
o->IsScopeInfo() ||
- o->map() == HEAP->fixed_cow_array_map();
+ o->map() ==
+ startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
private:
@@ -629,12 +632,13 @@ class PartialSerializer : public Serializer {
class StartupSerializer : public Serializer {
public:
- explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+ StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
+ : Serializer(isolate, sink) {
// Clear the cache of objects used by the partial snapshot. After the
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects needed by that partial
// snapshot.
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
+ isolate->set_serialize_partial_snapshot_cache_length(0);
}
// Serialize the current state of the heap. The order is:
// 1) Strong references.
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index 576269df9..96034e352 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -116,7 +116,7 @@ bool Snapshot::HaveASnapshotToStartFrom() {
}
-Handle<Context> Snapshot::NewContextFromSnapshot() {
+Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
if (context_size_ == 0) {
return Handle<Context>();
}
@@ -132,7 +132,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
context_property_cell_space_used_);
- deserializer.DeserializePartial(&root);
+ deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
}
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index 149306e44..4041f2925 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -43,7 +43,7 @@ class Snapshot {
static bool HaveASnapshotToStartFrom();
// Create a new context using the internal partial snapshot.
- static Handle<Context> NewContextFromSnapshot();
+ static Handle<Context> NewContextFromSnapshot(Isolate* isolate);
// Returns whether or not the snapshot is enabled.
static bool IsEnabled() { return size_ != 0; }
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 77117b8a4..be2ae2a57 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -194,11 +194,11 @@ void MemoryChunk::set_scan_on_scavenge(bool scan) {
}
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
OffsetFrom(addr) & ~Page::kPageAlignmentMask);
if (maybe->owner() != NULL) return maybe;
- LargeObjectIterator iterator(HEAP->lo_space());
+ LargeObjectIterator iterator(heap->lo_space());
for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
// Fixed arrays are the only pointer-containing objects in large object
// space.
@@ -315,12 +315,12 @@ MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
- if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
+ if (FLAG_stress_compaction && !heap()->linear_allocation()) {
if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
- HEAP->one_pointer_filler_map();
+ heap()->one_pointer_filler_map();
}
old_top += filler_size;
allocation_info_.top += filler_size;
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 5935c4a0e..2faf41912 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -228,10 +228,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
+ if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
+ current.start,
+ commit_size,
+ *allocated)) {
*allocated = 0;
return NULL;
}
@@ -245,7 +245,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return code_range_->Commit(start, length, true);
+ return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
}
@@ -278,7 +278,9 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
capacity_(0),
capacity_executable_(0),
size_(0),
- size_executable_(0) {
+ size_executable_(0),
+ lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+ highest_ever_allocated_(reinterpret_cast<void*>(0)) {
}
@@ -304,6 +306,17 @@ void MemoryAllocator::TearDown() {
}
+bool MemoryAllocator::CommitMemory(Address base,
+ size_t size,
+ Executability executable) {
+ if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+ return false;
+ }
+ UpdateAllocatedSpaceLimits(base, base + size);
+ return true;
+}
+
+
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
@@ -383,7 +396,9 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
base = NULL;
}
} else {
- if (!reservation.Commit(base, commit_size, false)) {
+ if (reservation.Commit(base, commit_size, false)) {
+ UpdateAllocatedSpaceLimits(base, base + commit_size);
+ } else {
base = NULL;
}
}
@@ -509,7 +524,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) {
- if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
+ Executability executable = IsFlagSet(IS_EXECUTABLE)
+ ? EXECUTABLE : NOT_EXECUTABLE;
+ if (!heap()->isolate()->memory_allocator()->CommitMemory(
+ start, length, executable)) {
return false;
}
} else {
@@ -763,7 +781,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+ if (!CommitMemory(start, size, executable)) return false;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size);
@@ -899,6 +917,9 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
return false;
}
+ UpdateAllocatedSpaceLimits(start,
+ start + CodePageAreaStartOffset() +
+ commit_size - CodePageGuardStartOffset());
return true;
}
@@ -1777,8 +1798,7 @@ void SemiSpaceIterator::Initialize(Address start,
#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms() {
- Isolate* isolate = Isolate::Current();
+static void ClearHistograms(Isolate* isolate) {
// We reset the name each time, though it hasn't changed.
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
@@ -1829,8 +1849,7 @@ static int CollectHistogramInfo(HeapObject* obj) {
}
-static void ReportHistogram(bool print_spill) {
- Isolate* isolate = Isolate::Current();
+static void ReportHistogram(Isolate* isolate, bool print_spill) {
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (isolate->heap_histograms()[i].number() > 0) {
@@ -1931,7 +1950,7 @@ void NewSpace::ReportStatistics() {
#endif // DEBUG
if (FLAG_log_gc) {
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = heap()->isolate();
DoReportStatistics(isolate, allocated_histogram_, "allocated");
DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
@@ -2043,8 +2062,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
- ScopedLock lock_target(mutex_);
- ScopedLock lock_source(category->mutex());
+ LockGuard<Mutex> target_lock_guard(mutex());
+ LockGuard<Mutex> source_lock_guard(category->mutex());
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
@@ -2685,8 +2704,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
ReportCodeKindStatistics(isolate->code_kind_statistics());
@@ -2703,8 +2721,7 @@ void PagedSpace::ReportCodeStatistics() {
}
-void PagedSpace::ResetCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
ClearCodeKindStatistics(isolate->code_kind_statistics());
@@ -2819,11 +2836,11 @@ void PagedSpace::ReportStatistics() {
Capacity(), Waste(), Available(), pct);
if (was_swept_conservatively_) return;
- ClearHistograms();
+ ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
CollectHistogramInfo(obj);
- ReportHistogram(true);
+ ReportHistogram(heap()->isolate(), true);
}
#endif
@@ -2852,8 +2869,7 @@ void FixedSpace::PrepareForMarkCompact() {
// the VerifyObject definition behind VERIFY_HEAP.
void MapSpace::VerifyObject(HeapObject* object) {
- // The object should be a map or a free-list node.
- CHECK(object->IsMap() || object->IsFreeSpace());
+ CHECK(object->IsMap());
}
@@ -2864,16 +2880,12 @@ void MapSpace::VerifyObject(HeapObject* object) {
// the VerifyObject definition behind VERIFY_HEAP.
void CellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- CHECK(object->IsCell() ||
- object->map() == heap()->two_pointer_filler_map());
+ CHECK(object->IsCell());
}
void PropertyCellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- CHECK(object->IsPropertyCell() ||
- object->map() == heap()->two_pointer_filler_map());
+ CHECK(object->IsPropertyCell());
}
@@ -3165,7 +3177,7 @@ void LargeObjectSpace::Print() {
void LargeObjectSpace::ReportStatistics() {
PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
int num_objects = 0;
- ClearHistograms();
+ ClearHistograms(heap()->isolate());
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
num_objects++;
@@ -3174,7 +3186,7 @@ void LargeObjectSpace::ReportStatistics() {
PrintF(" number of objects %d, "
"size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(false);
+ if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index aa864b66b..43f44a5c7 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -32,6 +32,7 @@
#include "hashmap.h"
#include "list.h"
#include "log.h"
+#include "platform/mutex.h"
#include "v8utils.h"
namespace v8 {
@@ -306,7 +307,7 @@ class MemoryChunk {
}
// Only works for addresses in pointer spaces, not data or code spaces.
- static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+ static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Address address() { return reinterpret_cast<Address>(this); }
@@ -1082,6 +1083,13 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
}
+ // Returns an indication of whether a pointer is in a space that has
+ // been allocated by this MemoryAllocator.
+ V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
+ }
+
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
@@ -1104,6 +1112,8 @@ class MemoryAllocator {
Executability executable,
VirtualMemory* controller);
+ bool CommitMemory(Address addr, size_t size, Executability executable);
+
void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
@@ -1149,10 +1159,10 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
+ MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
private:
Isolate* isolate_;
@@ -1167,6 +1177,14 @@ class MemoryAllocator {
// Allocated executable space size in bytes.
size_t size_executable_;
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addrsses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ void* lowest_ever_allocated_;
+ void* highest_ever_allocated_;
+
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space,
@@ -1189,6 +1207,11 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
+ void UpdateAllocatedSpaceLimits(void* low, void* high) {
+ lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+ highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -1445,13 +1468,8 @@ class FreeListCategory {
FreeListCategory() :
top_(NULL),
end_(NULL),
- mutex_(OS::CreateMutex()),
available_(0) {}
- ~FreeListCategory() {
- delete mutex_;
- }
-
intptr_t Concatenate(FreeListCategory* category);
void Reset();
@@ -1477,7 +1495,7 @@ class FreeListCategory {
int available() const { return available_; }
void set_available(int available) { available_ = available; }
- Mutex* mutex() { return mutex_; }
+ Mutex* mutex() { return &mutex_; }
#ifdef DEBUG
intptr_t SumFreeList();
@@ -1487,7 +1505,7 @@ class FreeListCategory {
private:
FreeListNode* top_;
FreeListNode* end_;
- Mutex* mutex_;
+ Mutex mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
@@ -1757,8 +1775,8 @@ class PagedSpace : public Space {
// Report code object related statistics
void CollectCodeStatistics();
- static void ReportCodeStatistics();
- static void ResetCodeStatistics();
+ static void ReportCodeStatistics(Isolate* isolate);
+ static void ResetCodeStatistics(Isolate* isolate);
#endif
bool was_swept_conservatively() { return was_swept_conservatively_; }
diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h
index bb386dbac..e1fcdee66 100644
--- a/deps/v8/src/store-buffer-inl.h
+++ b/deps/v8/src/store-buffer-inl.h
@@ -67,7 +67,7 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (top >= old_limit_) {
ASSERT(callback_ != NULL);
(*callback_)(heap_,
- MemoryChunk::FromAnyPointerAddress(addr),
+ MemoryChunk::FromAnyPointerAddress(heap_, addr),
kStoreBufferFullEvent);
}
}
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index 9705b6048..22a546742 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -170,7 +170,10 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ if (chunk->scan_on_scavenge()) {
+ page_has_scan_on_scavenge_flag = true;
+ break;
+ }
}
if (page_has_scan_on_scavenge_flag) {
@@ -218,7 +221,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
int old_counter = containing_chunk->store_buffer_counter();
if (old_counter == threshold) {
@@ -244,7 +247,7 @@ void StoreBuffer::Filter(int flag) {
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
previous_chunk = containing_chunk;
}
if (!containing_chunk->IsFlagSet(flag)) {
@@ -279,7 +282,10 @@ bool StoreBuffer::PrepareForIteration() {
MemoryChunk* chunk;
bool page_has_scan_on_scavenge_flag = false;
while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ if (chunk->scan_on_scavenge()) {
+ page_has_scan_on_scavenge_flag = true;
+ break;
+ }
}
if (page_has_scan_on_scavenge_flag) {
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 9c4394ed7..45b675fa8 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -194,7 +194,8 @@ void StringStream::PrintObject(Object* o) {
return;
}
if (o->IsHeapObject()) {
- DebugObjectCache* debug_object_cache = Isolate::Current()->
+ HeapObject* ho = HeapObject::cast(o);
+ DebugObjectCache* debug_object_cache = ho->GetIsolate()->
string_stream_debug_object_cache();
for (int i = 0; i < debug_object_cache->length(); i++) {
if ((*debug_object_cache)[i] == o) {
@@ -268,8 +269,8 @@ SmartArrayPointer<const char> StringStream::ToCString() const {
}
-void StringStream::Log() {
- LOG(ISOLATE, StringEvent("StackDump", buffer_));
+void StringStream::Log(Isolate* isolate) {
+ LOG(isolate, StringEvent("StackDump", buffer_));
}
@@ -289,14 +290,13 @@ void StringStream::OutputToFile(FILE* out) {
}
-Handle<String> StringStream::ToString() {
- Factory* factory = Isolate::Current()->factory();
- return factory->NewStringFromUtf8(Vector<const char>(buffer_, length_));
+Handle<String> StringStream::ToString(Isolate* isolate) {
+ return isolate->factory()->NewStringFromUtf8(
+ Vector<const char>(buffer_, length_));
}
-void StringStream::ClearMentionedObjectCache() {
- Isolate* isolate = Isolate::Current();
+void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
isolate->set_string_stream_debug_object_cache(
@@ -307,9 +307,8 @@ void StringStream::ClearMentionedObjectCache() {
#ifdef DEBUG
-bool StringStream::IsMentionedObjectCacheClear() {
- return (
- Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
+bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
+ return isolate->string_stream_debug_object_cache()->length() == 0;
}
#endif
@@ -351,7 +350,7 @@ void StringStream::PrintName(Object* name) {
void StringStream::PrintUsingMap(JSObject* js_object) {
Map* map = js_object->map();
- if (!HEAP->Contains(map) ||
+ if (!js_object->GetHeap()->Contains(map) ||
!map->IsHeapObject() ||
!map->IsMap()) {
Add("<Invalid map>\n");
@@ -385,7 +384,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Heap* heap = HEAP;
+ Heap* heap = array->GetHeap();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
if (element != heap->the_hole_value()) {
@@ -422,9 +421,9 @@ void StringStream::PrintByteArray(ByteArray* byte_array) {
}
-void StringStream::PrintMentionedObjectCache() {
+void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
DebugObjectCache* debug_object_cache =
- Isolate::Current()->string_stream_debug_object_cache();
+ isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (int i = 0; i < debug_object_cache->length(); i++) {
HeapObject* printee = (*debug_object_cache)[i];
@@ -457,12 +456,12 @@ void StringStream::PrintMentionedObjectCache() {
void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- Isolate* isolate = Isolate::Current();
+ if (!f->IsHeapObject()) return;
+ HeapObject* obj = HeapObject::cast(f);
+ Isolate* isolate = obj->GetIsolate();
Heap* heap = isolate->heap();
- if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
- return;
- }
- Map* map = HeapObject::cast(f)->map();
+ if (!heap->Contains(obj)) return;
+ Map* map = obj->map();
if (!map->IsHeapObject() ||
!heap->Contains(map) ||
!map->IsMap() ||
@@ -492,48 +491,39 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
- if (f->IsHeapObject() &&
- HEAP->Contains(HeapObject::cast(f)) &&
- HEAP->Contains(HeapObject::cast(f)->map()) &&
- HeapObject::cast(f)->map()->IsMap()) {
- if (f->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(f);
- // Common case: on-stack function present and resolved.
- PrintPrototype(fun, receiver);
- *code = fun->code();
- } else if (f->IsInternalizedString()) {
- // Unresolved and megamorphic calls: Instead of the function
- // we have the function name on the stack.
- PrintName(f);
- Add("/* unresolved */ ");
- } else {
- // Unless this is the frame of a built-in function, we should always have
- // the callee function or name on the stack. If we don't, we have a
- // problem or a change of the stack frame layout.
- Add("%o", f);
- Add("/* warning: no JSFunction object or function name found */ ");
- }
- /* } else if (is_trampoline()) {
- Print("trampoline ");
- */
+ if (!f->IsHeapObject()) {
+ Add("/* warning: 'function' was not a heap object */ ");
+ return;
+ }
+ Heap* heap = HeapObject::cast(f)->GetHeap();
+ if (!heap->Contains(HeapObject::cast(f))) {
+ Add("/* warning: 'function' was not on the heap */ ");
+ return;
+ }
+ if (!heap->Contains(HeapObject::cast(f)->map())) {
+ Add("/* warning: function's map was not on the heap */ ");
+ return;
+ }
+ if (!HeapObject::cast(f)->map()->IsMap()) {
+ Add("/* warning: function's map was not a valid map */ ");
+ return;
+ }
+ if (f->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(f);
+ // Common case: on-stack function present and resolved.
+ PrintPrototype(fun, receiver);
+ *code = fun->code();
+ } else if (f->IsInternalizedString()) {
+ // Unresolved and megamorphic calls: Instead of the function
+ // we have the function name on the stack.
+ PrintName(f);
+ Add("/* unresolved */ ");
} else {
- if (!f->IsHeapObject()) {
- Add("/* warning: 'function' was not a heap object */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f))) {
- Add("/* warning: 'function' was not on the heap */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f)->map())) {
- Add("/* warning: function's map was not on the heap */ ");
- return;
- }
- if (!HeapObject::cast(f)->map()->IsMap()) {
- Add("/* warning: function's map was not a valid map */ ");
- return;
- }
- Add("/* warning: Invalid JSFunction object found */ ");
+ // Unless this is the frame of a built-in function, we should always have
+ // the callee function or name on the stack. If we don't, we have a
+ // problem or a change of the stack frame layout.
+ Add("%o", f);
+ Add("/* warning: no JSFunction object or function name found */ ");
}
}
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 236799411..e3db2a8a8 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -147,8 +147,8 @@ class StringStream {
// Getting the message out.
void OutputToFile(FILE* out);
void OutputToStdOut() { OutputToFile(stdout); }
- void Log();
- Handle<String> ToString();
+ void Log(Isolate* isolate);
+ Handle<String> ToString(Isolate* isolate);
SmartArrayPointer<const char> ToCString() const;
int length() const { return length_; }
@@ -169,10 +169,10 @@ class StringStream {
}
// Mentioned object cache support.
- void PrintMentionedObjectCache();
- static void ClearMentionedObjectCache();
+ void PrintMentionedObjectCache(Isolate* isolate);
+ static void ClearMentionedObjectCache(Isolate* isolate);
#ifdef DEBUG
- static bool IsMentionedObjectCacheClear();
+ static bool IsMentionedObjectCacheClear(Isolate* isolate);
#endif
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 9e29a95eb..7b23d0c96 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -45,9 +45,7 @@ namespace internal {
StubCache::StubCache(Isolate* isolate)
- : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
-}
+ : isolate_(isolate) { }
void StubCache::Initialize() {
@@ -168,68 +166,68 @@ Handle<Code> StubCache::FindStoreHandler(Handle<Name> name,
}
-Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<JSObject> receiver,
+Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name) {
- Handle<Code> ic = FindIC(name, receiver, Code::LOAD_IC, handler->type());
+ Handle<Map> map(receiver->map());
+ Handle<Code> ic = FindIC(name, map, Code::LOAD_IC, handler->type());
if (!ic.is_null()) return ic;
LoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
-Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
- Handle<Code> ic = FindIC(
- name, receiver, Code::KEYED_LOAD_IC, handler->type());
+Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(
+ Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name) {
+ Handle<Map> map(receiver->map());
+ Handle<Code> ic = FindIC(name, map, Code::KEYED_LOAD_IC, handler->type());
if (!ic.is_null()) return ic;
KeyedLoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
-Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<JSObject> receiver,
+Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode) {
+ Handle<Map> map(receiver->map());
Handle<Code> ic = FindIC(
- name, receiver, Code::STORE_IC, handler->type(), strict_mode);
+ name, map, Code::STORE_IC, handler->type(), strict_mode);
if (!ic.is_null()) return ic;
StoreStubCompiler ic_compiler(isolate(), strict_mode);
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
Handle<Code> StubCache::ComputeMonomorphicKeyedStoreIC(
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode) {
+ Handle<Map> map(receiver->map());
Handle<Code> ic = FindIC(
- name, receiver, Code::KEYED_STORE_IC, handler->type(), strict_mode);
+ name, map, Code::KEYED_STORE_IC, handler->type(), strict_mode);
if (!ic.is_null()) return ic;
KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
@@ -266,7 +264,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
handler =
compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
- JSObject::UpdateMapCodeCache(receiver, cache_name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, cache_name, handler);
return handler;
}
@@ -291,7 +289,7 @@ Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadField(receiver, holder, name, field, representation);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -310,7 +308,25 @@ Handle<Code> StubCache::ComputeLoadCallback(
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadCallback(receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
+}
+
+
+Handle<Code> StubCache::ComputeLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization) {
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindLoadHandler(
+ name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> handler =
+ compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -327,7 +343,7 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadViaGetter(receiver, holder, name, getter);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -343,7 +359,7 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -360,7 +376,7 @@ Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadInterceptor(receiver, holder, name);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -383,7 +399,7 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> ic =
compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- JSObject::UpdateMapCodeCache(stub_holder, name, ic);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, ic);
return ic;
}
@@ -409,7 +425,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadField(receiver, holder, name, field, representation);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -426,7 +442,7 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -442,7 +458,7 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadInterceptor(receiver, holder, name);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -460,7 +476,25 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadCallback(receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
+}
+
+
+Handle<Code> StubCache::ComputeKeyedLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization) {
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindLoadHandler(
+ name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> handler =
+ compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -475,7 +509,7 @@ Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -492,7 +526,7 @@ Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler =
compiler.CompileStoreTransition(receiver, lookup, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -572,7 +606,7 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
code->ReplaceNthObject(1, *cell_map, *cell);
- JSObject::UpdateMapCodeCache(receiver, name, code);
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -592,7 +626,25 @@ Handle<Code> StubCache::ComputeStoreCallback(
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreCallback(
receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
+}
+
+
+Handle<Code> StubCache::ComputeStoreCallback(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization,
+ StrictModeFlag strict_mode) {
+ Handle<Code> stub = FindStoreHandler(
+ name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
+ if (!stub.is_null()) return stub;
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> handler = compiler.CompileStoreCallback(
+ receiver, holder, name, call_optimization);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -609,7 +661,7 @@ Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreViaSetter(
receiver, holder, name, setter);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -623,7 +675,7 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreInterceptor(receiver, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -638,7 +690,7 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name,
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -656,7 +708,7 @@ Handle<Code> StubCache::ComputeKeyedStoreTransition(
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> handler =
compiler.CompileStoreTransition(receiver, lookup, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -713,7 +765,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
if (CallStubCompiler::CanBeCached(function)) {
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
}
return code;
}
@@ -754,7 +806,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -793,7 +845,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -824,7 +876,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
if (CallStubCompiler::CanBeCached(function)) {
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
}
return code;
}
@@ -1176,7 +1228,8 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
JSObject* recv = JSObject::cast(args[0]);
ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
+ v8::AccessorSetterCallback fun =
+ FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
ASSERT(fun != NULL);
ASSERT(callback->IsCompatibleReceiver(recv));
Handle<Name> name = args.at<Name>(2);
@@ -1196,9 +1249,6 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
}
-static const int kAccessorInfoOffsetInInterceptorArgs = 2;
-
-
/**
* Attempts to load a property with an interceptor (which must be present),
* but doesn't search the prototype chain.
@@ -1207,13 +1257,11 @@ static const int kAccessorInfoOffsetInInterceptorArgs = 2;
* provide any value for the given name.
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
- typedef PropertyCallbackArguments PCA;
- static const int kArgsOffset = kAccessorInfoOffsetInInterceptorArgs;
- Handle<Name> name_handle = args.at<Name>(0);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
- ASSERT(kArgsOffset == 2);
- // No ReturnValue in interceptors.
- ASSERT_EQ(kArgsOffset + PCA::kArgsLength - 2, args.length());
+ ASSERT(args.length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args.at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info =
+ args.at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
// TODO(rossberg): Support symbols in the API.
if (name_handle->IsSymbol())
@@ -1221,18 +1269,16 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
ASSERT(getter != NULL);
Handle<JSObject> receiver =
- args.at<JSObject>(kArgsOffset - PCA::kThisIndex);
+ args.at<JSObject>(StubCache::kInterceptorArgsThisIndex);
Handle<JSObject> holder =
- args.at<JSObject>(kArgsOffset - PCA::kHolderIndex);
- PropertyCallbackArguments callback_args(isolate,
- interceptor_info->data(),
- *receiver,
- *holder);
+ args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
+ PropertyCallbackArguments callback_args(
+ isolate, interceptor_info->data(), *receiver, *holder);
{
// Use the interceptor getter.
HandleScope scope(isolate);
@@ -1256,7 +1302,7 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
// can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME, isolate);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsUndeclaredGlobal()) return HEAP->undefined_value();
+ if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
// Throw a reference error.
HandleScope scope(isolate);
@@ -1270,17 +1316,15 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
static MaybeObject* LoadWithInterceptor(Arguments* args,
PropertyAttributes* attrs) {
- typedef PropertyCallbackArguments PCA;
- static const int kArgsOffset = kAccessorInfoOffsetInInterceptorArgs;
- Handle<Name> name_handle = args->at<Name>(0);
- Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
- ASSERT(kArgsOffset == 2);
- // No ReturnValue in interceptors.
- ASSERT_EQ(kArgsOffset + PCA::kArgsLength - 2, args->length());
+ ASSERT(args->length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args->at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info =
+ args->at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
Handle<JSObject> receiver_handle =
- args->at<JSObject>(kArgsOffset - PCA::kThisIndex);
+ args->at<JSObject>(StubCache::kInterceptorArgsThisIndex);
Handle<JSObject> holder_handle =
- args->at<JSObject>(kArgsOffset - PCA::kHolderIndex);
+ args->at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
Isolate* isolate = receiver_handle->GetIsolate();
@@ -1291,8 +1335,8 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
ASSERT(getter != NULL);
PropertyCallbackArguments callback_args(isolate,
@@ -1647,6 +1691,25 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
}
+Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ ASSERT(call_optimization.is_simple_api_call());
+ Label success;
+
+ Handle<JSFunction> callback = call_optimization.constant_function();
+ CallbackHandlerFrontend(
+ object, receiver(), holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(call_optimization);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1855,9 +1918,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
- if (FLAG_compiled_keyed_stores &&
- (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements())) {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
stub = KeyedStoreFastElementStub(
is_jsarray,
elements_kind,
@@ -1998,9 +2060,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
is_js_array,
store_mode_).GetCode(isolate());
} else {
- if (FLAG_compiled_keyed_stores &&
- (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements())) {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
cached_stub = KeyedStoreFastElementStub(
is_js_array,
elements_kind,
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index bd0678ed3..63cb42b46 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -48,6 +48,8 @@ namespace internal {
// invalidate the cache whenever a prototype map is changed. The stub
// validates the map chain as in the mono-morphic case.
+
+class CallOptimization;
class SmallMapList;
class StubCache;
@@ -102,20 +104,20 @@ class StubCache {
Code::StubType type,
StrictModeFlag strict_mode);
- Handle<Code> ComputeMonomorphicLoadIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name);
- Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name);
- Handle<Code> ComputeMonomorphicStoreIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode);
- Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode);
@@ -136,6 +138,11 @@ class StubCache {
Handle<JSObject> holder,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> ComputeLoadCallback(Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization);
+
Handle<Code> ComputeLoadViaGetter(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -173,6 +180,12 @@ class StubCache {
Handle<JSObject> holder,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> ComputeKeyedLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization);
+
Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -209,6 +222,12 @@ class StubCache {
Handle<ExecutableAccessorInfo> callback,
StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreCallback(Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimation,
+ StrictModeFlag strict_mode);
+
Handle<Code> ComputeStoreViaSetter(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -389,6 +408,16 @@ class StubCache {
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
+ // These constants describe the structure of the interceptor arguments on the
+ // stack. The arguments are pushed by the (platform-specific)
+ // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
+ // LoadWithInterceptor.
+ static const int kInterceptorArgsNameIndex = 0;
+ static const int kInterceptorArgsInfoIndex = 1;
+ static const int kInterceptorArgsThisIndex = 2;
+ static const int kInterceptorArgsHolderIndex = 3;
+ static const int kInterceptorArgsLength = 4;
+
private:
explicit StubCache(Isolate* isolate);
@@ -705,6 +734,11 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
+
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -730,7 +764,7 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback);
+ Handle<Object> callback);
void NonexistentHandlerFrontend(Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@@ -744,6 +778,7 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
void GenerateLoadConstant(Handle<Object> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadCallback(const CallOptimization& call_optimization);
void GenerateLoadInterceptor(Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -941,6 +976,11 @@ class StoreStubCompiler: public BaseStoreStubCompiler {
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
+
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<JSFunction> setter);
@@ -952,10 +992,6 @@ class StoreStubCompiler: public BaseStoreStubCompiler {
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
Handle<Name> name);
- Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
- Handle<PropertyCell> holder,
- Handle<Name> name);
-
private:
static Register* registers();
virtual Code::Kind kind() { return Code::STORE_IC; }
@@ -984,18 +1020,6 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
- static void GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array,
- ElementsKind element_kind,
- KeyedAccessStoreMode store_mode);
-
- static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode);
-
- static void GenerateStoreExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
-
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
protected:
@@ -1040,8 +1064,6 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
V(ArrayCode)
-class CallOptimization;
-
class CallStubCompiler: public StubCompiler {
public:
CallStubCompiler(Isolate* isolate,
@@ -1173,6 +1195,12 @@ class CallOptimization BASE_EMBEDDED {
int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
Handle<JSObject> holder) const;
+ bool IsCompatibleReceiver(Object* receiver) {
+ ASSERT(is_simple_api_call());
+ if (expected_receiver_type_.is_null()) return true;
+ return receiver->IsInstanceOf(*expected_receiver_type_);
+ }
+
private:
void Initialize(Handle<JSFunction> function);
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index ede567a48..58c684a54 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -42,9 +42,9 @@ SweeperThread::SweeperThread(Isolate* isolate)
isolate_(isolate),
heap_(isolate->heap()),
collector_(heap_->mark_compact_collector()),
- start_sweeping_semaphore_(OS::CreateSemaphore(0)),
- end_sweeping_semaphore_(OS::CreateSemaphore(0)),
- stop_semaphore_(OS::CreateSemaphore(0)),
+ start_sweeping_semaphore_(0),
+ end_sweeping_semaphore_(0),
+ stop_semaphore_(0),
free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
@@ -61,10 +61,10 @@ void SweeperThread::Run() {
DisallowHandleDereference no_deref;
while (true) {
- start_sweeping_semaphore_->Wait();
+ start_sweeping_semaphore_.Wait();
if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
+ stop_semaphore_.Signal();
return;
}
@@ -74,7 +74,7 @@ void SweeperThread::Run() {
collector_->SweepInParallel(heap_->old_pointer_space(),
&private_free_list_old_pointer_space_,
&free_list_old_pointer_space_);
- end_sweeping_semaphore_->Signal();
+ end_sweeping_semaphore_.Signal();
}
}
@@ -91,18 +91,18 @@ intptr_t SweeperThread::StealMemory(PagedSpace* space) {
void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_sweeping_semaphore_->Signal();
- stop_semaphore_->Wait();
+ start_sweeping_semaphore_.Signal();
+ stop_semaphore_.Wait();
Join();
}
void SweeperThread::StartSweeping() {
- start_sweeping_semaphore_->Signal();
+ start_sweeping_semaphore_.Signal();
}
void SweeperThread::WaitForSweeperThread() {
- end_sweeping_semaphore_->Wait();
+ end_sweeping_semaphore_.Wait();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/sweeper-thread.h b/deps/v8/src/sweeper-thread.h
index a17098214..c36cfc39a 100644
--- a/deps/v8/src/sweeper-thread.h
+++ b/deps/v8/src/sweeper-thread.h
@@ -43,6 +43,7 @@ namespace internal {
class SweeperThread : public Thread {
public:
explicit SweeperThread(Isolate* isolate);
+ ~SweeperThread() {}
void Run();
void Stop();
@@ -50,19 +51,13 @@ class SweeperThread : public Thread {
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
- ~SweeperThread() {
- delete start_sweeping_semaphore_;
- delete end_sweeping_semaphore_;
- delete stop_semaphore_;
- }
-
private:
Isolate* isolate_;
Heap* heap_;
MarkCompactCollector* collector_;
- Semaphore* start_sweeping_semaphore_;
- Semaphore* end_sweeping_semaphore_;
- Semaphore* stop_semaphore_;
+ Semaphore start_sweeping_semaphore_;
+ Semaphore end_sweeping_semaphore_;
+ Semaphore stop_semaphore_;
FreeList free_list_old_data_space_;
FreeList free_list_old_pointer_space_;
FreeList private_free_list_old_data_space_;
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 086edcb99..9d3f03894 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -35,20 +35,20 @@ namespace v8 {
namespace internal {
-static MaybeObject* AllocateRaw(int length) {
- Heap* heap = Isolate::Current()->heap();
-
+static MaybeObject* AllocateRaw(Isolate* isolate, int length) {
// Use FixedArray to not use TransitionArray::cast on incomplete object.
FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(length);
+ MaybeObject* maybe_array = isolate->heap()->AllocateFixedArray(length);
if (!maybe_array->To(&array)) return maybe_array;
return array;
}
-MaybeObject* TransitionArray::Allocate(int number_of_transitions) {
+MaybeObject* TransitionArray::Allocate(Isolate* isolate,
+ int number_of_transitions) {
FixedArray* array;
- MaybeObject* maybe_array = AllocateRaw(ToKeyIndex(number_of_transitions));
+ MaybeObject* maybe_array =
+ AllocateRaw(isolate, ToKeyIndex(number_of_transitions));
if (!maybe_array->To(&array)) return maybe_array;
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
return array;
@@ -77,11 +77,11 @@ MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
MaybeObject* maybe_result;
if (flag == SIMPLE_TRANSITION) {
- maybe_result = AllocateRaw(kSimpleTransitionSize);
+ maybe_result = AllocateRaw(target->GetIsolate(), kSimpleTransitionSize);
if (!maybe_result->To(&result)) return maybe_result;
result->set(kSimpleTransitionTarget, target);
} else {
- maybe_result = Allocate(1);
+ maybe_result = Allocate(target->GetIsolate(), 1);
if (!maybe_result->To(&result)) return maybe_result;
result->NoIncrementalWriteBarrierSet(0, key, target);
}
@@ -94,7 +94,7 @@ MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
ASSERT(!IsFullTransitionArray());
int nof = number_of_transitions();
TransitionArray* result;
- MaybeObject* maybe_result = Allocate(nof);
+ MaybeObject* maybe_result = Allocate(GetIsolate(), nof);
if (!maybe_result->To(&result)) return maybe_result;
if (nof == 1) {
@@ -116,7 +116,7 @@ MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) {
if (insertion_index == kNotFound) ++new_size;
MaybeObject* maybe_array;
- maybe_array = TransitionArray::Allocate(new_size);
+ maybe_array = TransitionArray::Allocate(GetIsolate(), new_size);
if (!maybe_array->To(&result)) return maybe_array;
if (HasPrototypeTransitions()) {
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index fde127989..b2e983967 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -119,7 +119,8 @@ class TransitionArray: public FixedArray {
inline int Search(Name* name);
// Allocates a TransitionArray.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
+ MUST_USE_RESULT static MaybeObject* Allocate(
+ Isolate* isolate, int number_of_transitions);
bool IsSimpleTransition() {
return length() == kSimpleTransitionSize &&
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 336b459d6..190eb3e6f 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -152,12 +152,8 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool standard_store = FLAG_compiled_keyed_stores ||
- (Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
- STANDARD_STORE);
bool preliminary_checks =
code->is_keyed_store_stub() &&
- standard_store &&
code->ic_state() == MONOMORPHIC &&
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
@@ -174,10 +170,7 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool standard_store = FLAG_compiled_keyed_stores ||
- (Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
- STANDARD_STORE);
- return code->is_keyed_store_stub() && standard_store &&
+ return code->is_keyed_store_stub() &&
code->ic_state() == POLYMORPHIC;
}
return false;
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index fc69c7852..2810ffc8a 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -92,7 +92,7 @@ namespace internal {
// Note that the bitset representation is closed under both Union and Intersect.
//
// The type representation is heap-allocated, so cannot (currently) be used in
-// a parallel compilation context.
+// a concurrent compilation context.
#define PRIMITIVE_TYPE_LIST(V) \
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index f8e2a7c20..34bb64bd7 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -42,7 +42,7 @@ AstTyper::AstTyper(CompilationInfo* info)
info->isolate(),
info->zone()),
store_(info->zone()) {
- InitializeAstVisitor();
+ InitializeAstVisitor(info->isolate());
}
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
new file mode 100644
index 000000000..7ae704a26
--- /dev/null
+++ b/deps/v8/src/unique.h
@@ -0,0 +1,266 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_UNIQUE_H_
+#define V8_HYDROGEN_UNIQUE_H_
+
+#include "handles.h"
+#include "utils.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename T>
+class UniqueSet;
+
+
+// Represents a handle to an object on the heap, but with the additional
+// ability of checking for equality and hashing without accessing the heap.
+//
+// Creating a Unique<T> requires first dereferencing the handle to obtain
+// the address of the object, which is used as the hashcode and the basis for
+// comparison. The object can be moved later by the GC, but comparison
+// and hashing use the old address of the object, without dereferencing it.
+//
+// Careful! Comparison of two Uniques is only correct if both were created
+// in the same "era" of GC or if at least one is a non-movable object.
+template <typename T>
+class Unique V8_FINAL {
+ public:
+ // TODO(titzer): make private and introduce some builder/owner class.
+ explicit Unique(Handle<T> handle) {
+ if (handle.is_null()) {
+ raw_address_ = NULL;
+ } else {
+ raw_address_ = reinterpret_cast<Address>(*handle);
+ ASSERT_NE(raw_address_, NULL);
+ }
+ handle_ = handle;
+ }
+
+ // Constructor for handling automatic up casting.
+ // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
+ template <class S> Unique(Unique<S> uniq) {
+#ifdef DEBUG
+ T* a = NULL;
+ S* b = NULL;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+#endif
+ raw_address_ = uniq.raw_address_;
+ handle_ = uniq.handle_; // Creates a new handle sharing the same location.
+ }
+
+ template <typename U>
+ bool operator==(const Unique<U>& other) const {
+ return raw_address_ == other.raw_address_;
+ }
+
+ template <typename U>
+ bool operator!=(const Unique<U>& other) const {
+ return raw_address_ != other.raw_address_;
+ }
+
+ intptr_t Hashcode() const {
+ return reinterpret_cast<intptr_t>(raw_address_);
+ }
+
+ bool IsNull() {
+ return raw_address_ == NULL;
+ }
+
+ // Don't do this unless you have access to the heap!
+ // No, seriously! You can compare and hash and set-ify uniques that were
+ // all created at the same time; please don't dereference.
+ Handle<T> handle() {
+ return handle_;
+ }
+
+ friend class UniqueSet<T>; // Uses internal details for speed.
+ template <class U>
+ friend class Unique; // For comparing raw_address values.
+
+ private:
+ Address raw_address_;
+ Handle<T> handle_;
+};
+
+
+template <typename T>
+class UniqueSet V8_FINAL : public ZoneObject {
+ public:
+ // Constructor. A new set will be empty.
+ UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
+
+ // Add a new element to this unique set. Mutates this set. O(|this|).
+ void Add(Unique<T> uniq, Zone* zone) {
+ // Keep the set sorted by the {raw_address} of the unique elements.
+ for (int i = 0; i < size_; i++) {
+ if (array_[i] == uniq) return;
+ if (array_[i].raw_address_ > uniq.raw_address_) {
+ // Insert in the middle.
+ Grow(size_ + 1, zone);
+ for (int j = size_ - 1; j >= i; j--) array_[j + 1] = array_[j];
+ array_[i] = uniq;
+ size_++;
+ return;
+ }
+ }
+ // Append the element to the the end.
+ Grow(size_ + 1, zone);
+ array_[size_++] = uniq;
+ }
+
+ // Compare this set against another set. O(|this|).
+ bool Equals(UniqueSet<T>* that) {
+ if (that->size_ != this->size_) return false;
+ for (int i = 0; i < this->size_; i++) {
+ if (this->array_[i] != that->array_[i]) return false;
+ }
+ return true;
+ }
+
+ // Check if this set is a subset of the given set. O(|this| + |that|).
+ bool IsSubset(UniqueSet<T>* that) {
+ if (that->size_ < this->size_) return false;
+ int j = 0;
+ for (int i = 0; i < this->size_; i++) {
+ Unique<T> sought = this->array_[i];
+ while (true) {
+ if (sought == that->array_[j++]) break;
+ // Fail whenever there are more elements in {this} than {that}.
+ if ((this->size_ - i) > (that->size_ - j)) return false;
+ }
+ }
+ return true;
+ }
+
+ // Returns a new set representing the intersection of this set and the other.
+ // O(|this| + |that|).
+ UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
+ if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>();
+ out->Grow(Min(this->size_, that->size_), zone);
+
+ int i = 0, j = 0, k = 0;
+ while (i < this->size_ && j < that->size_) {
+ Unique<T> a = this->array_[i];
+ Unique<T> b = that->array_[j];
+ if (a == b) {
+ out->array_[k++] = a;
+ i++;
+ j++;
+ } else if (a.raw_address_ < b.raw_address_) {
+ i++;
+ } else {
+ j++;
+ }
+ }
+
+ out->size_ = k;
+ return out;
+ }
+
+ // Returns a new set representing the union of this set and the other.
+ // O(|this| + |that|).
+ UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
+ if (that->size_ == 0) return this->Copy(zone);
+ if (this->size_ == 0) return that->Copy(zone);
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>();
+ out->Grow(this->size_ + that->size_, zone);
+
+ int i = 0, j = 0, k = 0;
+ while (i < this->size_ && j < that->size_) {
+ Unique<T> a = this->array_[i];
+ Unique<T> b = that->array_[j];
+ if (a == b) {
+ out->array_[k++] = a;
+ i++;
+ j++;
+ } else if (a.raw_address_ < b.raw_address_) {
+ out->array_[k++] = a;
+ i++;
+ } else {
+ out->array_[k++] = b;
+ j++;
+ }
+ }
+
+ while (i < this->size_) out->array_[k++] = this->array_[i++];
+ while (j < that->size_) out->array_[k++] = that->array_[j++];
+
+ out->size_ = k;
+ return out;
+ }
+
+ // Makes an exact copy of this set. O(|this| + |that|).
+ UniqueSet<T>* Copy(Zone* zone) {
+ UniqueSet<T>* copy = new(zone) UniqueSet<T>();
+ copy->size_ = this->size_;
+ copy->capacity_ = this->size_;
+ copy->array_ = zone->NewArray<Unique<T> >(this->size_);
+ memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>));
+ return copy;
+ }
+
+ inline int size() {
+ return size_;
+ }
+
+ private:
+ // These sets should be small, since operations are implemented with simple
+ // linear algorithms. Enforce a maximum size.
+ static const int kMaxCapacity = 65535;
+
+ uint16_t size_;
+ uint16_t capacity_;
+ Unique<T>* array_;
+
+ // Grow the size of internal storage to be at least {size} elements.
+ void Grow(int size, Zone* zone) {
+ CHECK(size < kMaxCapacity); // Enforce maximum size.
+ if (capacity_ < size) {
+ int new_capacity = 2 * capacity_ + size;
+ if (new_capacity > kMaxCapacity) new_capacity = kMaxCapacity;
+ Unique<T>* new_array = zone->NewArray<Unique<T> >(new_capacity);
+ if (size_ > 0) {
+ memcpy(new_array, array_, size_ * sizeof(Unique<T>));
+ }
+ capacity_ = new_capacity;
+ array_ = new_array;
+ }
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_UNIQUE_H_
diff --git a/deps/v8/src/utils/random-number-generator.cc b/deps/v8/src/utils/random-number-generator.cc
new file mode 100644
index 000000000..1e03ee244
--- /dev/null
+++ b/deps/v8/src/utils/random-number-generator.cc
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "utils/random-number-generator.h"
+
+#include <cstdio>
+
+#include "flags.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+static RandomNumberGenerator::EntropySource entropy_source = NULL;
+
+
+// static
+void RandomNumberGenerator::SetEntropySource(EntropySource source) {
+ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ entropy_source = source;
+}
+
+
+RandomNumberGenerator::RandomNumberGenerator() {
+ // Check --random-seed flag first.
+ if (FLAG_random_seed != 0) {
+ SetSeed(FLAG_random_seed);
+ return;
+ }
+
+ // Check if embedder supplied an entropy source.
+ { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ if (entropy_source != NULL) {
+ int64_t seed;
+ if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
+ sizeof(seed))) {
+ SetSeed(seed);
+ return;
+ }
+ }
+ }
+
+ // Gather entropy from /dev/urandom if available.
+ FILE* fp = fopen("/dev/urandom", "rb");
+ if (fp != NULL) {
+ int64_t seed;
+ size_t n = fread(&seed, sizeof(seed), 1, fp);
+ fclose(fp);
+ if (n == 1) {
+ SetSeed(seed);
+ return;
+ }
+ }
+
+ // We cannot assume that random() or rand() were seeded
+ // properly, so instead of relying on random() or rand(),
+ // we just seed our PRNG using timing data as fallback.
+ int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
+ seed ^= TimeTicks::HighResNow().ToInternalValue() << 16;
+ seed ^= TimeTicks::Now().ToInternalValue() << 8;
+ SetSeed(seed);
+}
+
+
+int RandomNumberGenerator::NextInt(int max) {
+ ASSERT_LE(0, max);
+
+ // Fast path if max is a power of 2.
+ if (IsPowerOf2(max)) {
+ return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
+ }
+
+ while (true) {
+ int rnd = Next(31);
+ int val = rnd % max;
+ if (rnd - val + (max - 1) >= 0) {
+ return val;
+ }
+ }
+}
+
+
+double RandomNumberGenerator::NextDouble() {
+ return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
+ static_cast<double>(static_cast<int64_t>(1) << 53);
+}
+
+
+void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
+ for (size_t n = 0; n < buflen; ++n) {
+ static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
+ }
+}
+
+
+int RandomNumberGenerator::Next(int bits) {
+ ASSERT_LT(0, bits);
+ ASSERT_GE(32, bits);
+ int64_t seed = (seed_ * kMultiplier + kAddend) & kMask;
+ seed_ = seed;
+ return static_cast<int>(seed >> (48 - bits));
+}
+
+
+void RandomNumberGenerator::SetSeed(int64_t seed) {
+ seed_ = (seed ^ kMultiplier) & kMask;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/utils/random-number-generator.h b/deps/v8/src/utils/random-number-generator.h
new file mode 100644
index 000000000..bd7dca7e6
--- /dev/null
+++ b/deps/v8/src/utils/random-number-generator.h
@@ -0,0 +1,106 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
+
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// RandomNumberGenerator
+//
+// This class is used to generate a stream of pseudorandom numbers. The class
+// uses a 48-bit seed, which is modified using a linear congruential formula.
+// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+// If two instances of RandomNumberGenerator are created with the same seed, and
+// the same sequence of method calls is made for each, they will generate and
+// return identical sequences of numbers.
+// This class is neither reentrant nor threadsafe.
+
+class RandomNumberGenerator V8_FINAL {
+ public:
+ // EntropySource is used as a callback function when V8 needs a source of
+ // entropy.
+ typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
+ static void SetEntropySource(EntropySource entropy_source);
+
+ RandomNumberGenerator();
+ explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
+
+ // Returns the next pseudorandom, uniformly distributed int value from this
+ // random number generator's sequence. The general contract of |NextInt()| is
+ // that one int value is pseudorandomly generated and returned.
+ // All 2^32 possible integer values are produced with (approximately) equal
+ // probability.
+ V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
+ return Next(32);
+ }
+
+ // Returns a pseudorandom, uniformly distributed int value between 0
+ // (inclusive) and the specified max value (exclusive), drawn from this random
+ // number generator's sequence. The general contract of |NextInt(int)| is that
+ // one int value in the specified range is pseudorandomly generated and
+ // returned. All max possible int values are produced with (approximately)
+ // equal probability.
+ int NextInt(int max) V8_WARN_UNUSED_RESULT;
+
+ // Returns the next pseudorandom, uniformly distributed boolean value from
+ // this random number generator's sequence. The general contract of
+ // |NextBoolean()| is that one boolean value is pseudorandomly generated and
+ // returned. The values true and false are produced with (approximately) equal
+ // probability.
+ V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
+ return Next(1) != 0;
+ }
+
+ // Returns the next pseudorandom, uniformly distributed double value between
+ // 0.0 and 1.0 from this random number generator's sequence.
+ // The general contract of |NextDouble()| is that one double value, chosen
+ // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
+ // (exclusive), is pseudorandomly generated and returned.
+ double NextDouble() V8_WARN_UNUSED_RESULT;
+
+ // Fills the elements of a specified array of bytes with random numbers.
+ void NextBytes(void* buffer, size_t buflen);
+
+ private:
+ static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
+ static const int64_t kAddend = 0xb;
+ static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
+
+ int Next(int bits) V8_WARN_UNUSED_RESULT;
+ void SetSeed(int64_t seed);
+
+ int64_t seed_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index 905e178fe..6711c8020 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -49,31 +49,31 @@ Counters::Counters(Isolate* isolate) {
#undef HM
#define SC(name, caption) \
- name##_ = StatsCounter("c:" #caption);
+ name##_ = StatsCounter(isolate, "c:" #caption);
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
#define SC(name) \
- count_of_##name##_ = StatsCounter("c:" "V8.CountOf_" #name); \
- size_of_##name##_ = StatsCounter("c:" "V8.SizeOf_" #name);
+ count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \
+ size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name);
INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
count_of_CODE_TYPE_##name##_ = \
- StatsCounter("c:" "V8.CountOf_CODE_TYPE-" #name); \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \
size_of_CODE_TYPE_##name##_ = \
- StatsCounter("c:" "V8.SizeOf_CODE_TYPE-" #name);
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name);
CODE_KIND_LIST(SC)
#undef SC
#define SC(name) \
count_of_FIXED_ARRAY_##name##_ = \
- StatsCounter("c:" "V8.CountOf_FIXED_ARRAY-" #name); \
+ StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \
size_of_FIXED_ARRAY_##name##_ = \
- StatsCounter("c:" "V8.SizeOf_FIXED_ARRAY-" #name);
+ StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
}
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 93f3efb2e..e894164cd 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -50,18 +50,9 @@ namespace internal {
V8_DECLARE_ONCE(init_once);
-bool V8::is_running_ = false;
-bool V8::has_been_set_up_ = false;
-bool V8::has_been_disposed_ = false;
-bool V8::has_fatal_error_ = false;
-bool V8::use_crankshaft_ = true;
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-
-static EntropySource entropy_source;
-
bool V8::Initialize(Deserializer* des) {
InitializeOncePerProcess();
@@ -80,31 +71,18 @@ bool V8::Initialize(Deserializer* des) {
ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
i::Isolate::Current());
- if (IsDead()) return false;
-
Isolate* isolate = Isolate::Current();
+ if (isolate->IsDead()) return false;
if (isolate->IsInitialized()) return true;
- is_running_ = true;
- has_been_set_up_ = true;
- has_fatal_error_ = false;
- has_been_disposed_ = false;
-
return isolate->Init(des);
}
-void V8::SetFatalError() {
- is_running_ = false;
- has_fatal_error_ = true;
-}
-
-
void V8::TearDown() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->IsDefaultIsolate());
-
- if (!has_been_set_up_ || has_been_disposed_) return;
+ if (!isolate->IsInitialized()) return;
// The isolate has to be torn down before clearing the LOperand
// caches so that the optimizing compiler thread (if running)
@@ -118,49 +96,10 @@ void V8::TearDown() {
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
- is_running_ = false;
- has_been_disposed_ = true;
-
delete call_completed_callbacks_;
call_completed_callbacks_ = NULL;
Sampler::TearDown();
- OS::TearDown();
-}
-
-
-static void seed_random(uint32_t* state) {
- for (int i = 0; i < 2; ++i) {
- if (FLAG_random_seed != 0) {
- state[i] = FLAG_random_seed;
- } else if (entropy_source != NULL) {
- uint32_t val;
- ScopedLock lock(entropy_mutex.Pointer());
- entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
- state[i] = val;
- } else {
- state[i] = random();
- }
- }
-}
-
-
-// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(uint32_t* state) {
- // Initialize seed using the system random().
- // No non-zero seed will ever become zero again.
- if (state[0] == 0) seed_random(state);
-
- // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
- state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
- state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-
- return (state[0] << 14) + (state[1] & 0x3FFFF);
-}
-
-
-void V8::SetEntropySource(EntropySource source) {
- entropy_source = source;
}
@@ -174,26 +113,18 @@ void V8::SetReturnAddressLocationResolver(
uint32_t V8::Random(Context* context) {
ASSERT(context->IsNativeContext());
ByteArray* seed = context->random_seed();
- return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
-}
-
-
-// Used internally by the JIT and memory allocator for security
-// purposes. So, we keep a different state to prevent informations
-// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- return random_base(isolate->private_random_seed());
-}
+ uint32_t* state = reinterpret_cast<uint32_t*>(seed->GetDataStartAddress());
+ // When we get here, the RNG must have been initialized,
+ // see the Genesis constructor in file bootstrapper.cc.
+ ASSERT_NE(0, state[0]);
+ ASSERT_NE(0, state[1]);
-bool V8::IdleNotification(int hint) {
- // Returning true tells the caller that there is no need to call
- // IdleNotification again.
- if (!FLAG_use_idle_notification) return true;
+ // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
+ state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
+ state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
- // Tell the heap that it may want to adjust.
- return HEAP->IdleNotification(hint);
+ return (state[0] << 14) + (state[1] & 0x3FFFF);
}
@@ -272,10 +203,10 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
- if (FLAG_parallel_recompilation &&
+ if (FLAG_concurrent_recompilation &&
(FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
- FLAG_parallel_recompilation = false;
- PrintF("Parallel recompilation has been disabled for tracing.\n");
+ FLAG_concurrent_recompilation = false;
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
}
if (FLAG_sweeper_threads <= 0) {
@@ -309,18 +240,14 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_marking_threads = 0;
}
- if (FLAG_parallel_recompilation &&
+ if (FLAG_concurrent_recompilation &&
SystemThreadManager::NumberOfParallelSystemThreads(
SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
- FLAG_parallel_recompilation = false;
+ FLAG_concurrent_recompilation = false;
}
- OS::SetUp();
Sampler::SetUp();
CPU::SetUp();
- use_crankshaft_ = FLAG_crankshaft
- && !Serializer::enabled()
- && CPU::SupportsCrankshaft();
OS::PostSetUp();
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 47893e821..5848f7481 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -82,12 +82,6 @@ class V8 : public AllStatic {
// empty heap.
static bool Initialize(Deserializer* des);
static void TearDown();
- static bool IsRunning() { return is_running_; }
- static bool UseCrankshaft() { return use_crankshaft_; }
- // To be dead you have to have lived
- // TODO(isolates): move IsDead to Isolate.
- static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
- static void SetFatalError();
// Report process out of memory. Implementation found in api.cc.
static void FatalProcessOutOfMemory(const char* location,
@@ -103,17 +97,9 @@ class V8 : public AllStatic {
static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
// Random number generation support. Not cryptographically safe.
static uint32_t Random(Context* context);
- // We use random numbers internally in memory allocation and in the
- // compilers for security. In order to prevent information leaks we
- // use a separate random state for internal random number
- // generation.
- static uint32_t RandomPrivate(Isolate* isolate);
static Object* FillHeapNumberWithRandom(Object* heap_number,
Context* context);
- // Idle notification directly from the API.
- static bool IdleNotification(int hint);
-
static void AddCallCompletedCallback(CallCompletedCallback callback);
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
@@ -131,18 +117,6 @@ class V8 : public AllStatic {
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
- // True if engine is currently running
- static bool is_running_;
- // True if V8 has ever been run
- static bool has_been_set_up_;
- // True if error has been signaled for current engine
- // (reset to false if engine is restarted)
- static bool has_fatal_error_;
- // True if engine has been shut down
- // (reset if engine is restarted)
- static bool has_been_disposed_;
- // True if we are using the crankshaft optimizing compiler.
- static bool use_crankshaft_;
// List of callbacks when a Call completes.
static List<CallCompletedCallback>* call_completed_callbacks_;
// Allocator for external array buffers.
diff --git a/deps/v8/src/v8dll-main.cc b/deps/v8/src/v8dll-main.cc
index 49d868957..7f6c9f955 100644
--- a/deps/v8/src/v8dll-main.cc
+++ b/deps/v8/src/v8dll-main.cc
@@ -30,8 +30,8 @@
#undef USING_V8_SHARED
#include "../include/v8.h"
-#ifdef WIN32
-#include <windows.h> // NOLINT
+#if V8_OS_WIN
+#include "win32-headers.h"
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,
@@ -41,4 +41,4 @@ BOOL WINAPI DllMain(HANDLE hinstDLL,
return TRUE;
}
}
-#endif
+#endif // V8_OS_WIN
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 6ec754788..7fa2fd62c 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -97,7 +97,7 @@ const int kPageSizeBits = 20;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
-const int kProcessorCacheLineSize = 64;
+#define PROCESSOR_CACHE_LINE_SIZE 64
// Constants relevant to double precision floating point numbers.
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
@@ -163,6 +163,7 @@ class Deserializer;
class MessageLocation;
class VirtualMemory;
class Mutex;
+class RecursiveMutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
@@ -346,8 +347,9 @@ union IeeeDoubleBigEndianArchType {
// AccessorCallback
struct AccessorDescriptor {
- MaybeObject* (*getter)(Object* object, void* data);
- MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
+ MaybeObject* (*getter)(Isolate* isolate, Object* object, void* data);
+ MaybeObject* (*setter)(
+ Isolate* isolate, JSObject* object, Object* value, void* data);
void* data;
};
@@ -412,34 +414,12 @@ enum StateTag {
#endif
-enum CpuImplementer {
- UNKNOWN_IMPLEMENTER,
- ARM_IMPLEMENTER,
- QUALCOMM_IMPLEMENTER
-};
-
-
-enum CpuPart {
- CPU_UNKNOWN,
- CORTEX_A15,
- CORTEX_A12,
- CORTEX_A9,
- CORTEX_A8,
- CORTEX_A7,
- CORTEX_A5
-};
-
-
// Feature flags bit positions. They are mostly based on the CPUID spec.
-// (We assign CPUID itself to one of the currently reserved bits --
-// feel free to change this if needed.)
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86
SSE2 = 26, // x86
CMOV = 15, // x86
- RDTSC = 4, // x86
- CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
SUDIV = 3, // ARM
@@ -585,6 +565,11 @@ enum ClearExceptionFlag {
};
+enum MinusZeroMode {
+ TREAT_MINUS_ZERO_AS_ZERO,
+ FAIL_ON_MINUS_ZERO
+};
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 2df187a57..33b620d8e 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -144,12 +144,13 @@ Unlocker::~Unlocker() {
void Locker::StartPreemption(int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
+ v8::internal::ContextSwitcher::StartPreemption(
+ i::Isolate::Current(), every_n_ms);
}
void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption();
+ v8::internal::ContextSwitcher::StopPreemption(i::Isolate::Current());
}
@@ -214,7 +215,7 @@ bool ThreadManager::RestoreThread() {
void ThreadManager::Lock() {
- mutex_->Lock();
+ mutex_.Lock();
mutex_owner_ = ThreadId::Current();
ASSERT(IsLockedByCurrentThread());
}
@@ -222,7 +223,7 @@ void ThreadManager::Lock() {
void ThreadManager::Unlock() {
mutex_owner_ = ThreadId::Invalid();
- mutex_->Unlock();
+ mutex_.Unlock();
}
@@ -303,8 +304,7 @@ ThreadState* ThreadState::Next() {
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadId::Invalid()),
+ : mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
lazily_archived_thread_state_(NULL),
free_anchor_(NULL),
@@ -315,7 +315,6 @@ ThreadManager::ThreadManager()
ThreadManager::~ThreadManager() {
- delete mutex_;
DeleteThreadStateList(free_anchor_);
DeleteThreadStateList(in_use_anchor_);
}
@@ -439,8 +438,7 @@ ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
// Set the scheduling interval of V8 threads. This function starts the
// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(int every_n_ms) {
- Isolate* isolate = Isolate::Current();
+void ContextSwitcher::StartPreemption(Isolate* isolate, int every_n_ms) {
ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() == NULL) {
// If the ContextSwitcher thread is not running at the moment start it now.
@@ -456,8 +454,7 @@ void ContextSwitcher::StartPreemption(int every_n_ms) {
// Disable preemption of V8 threads. If multiple threads want to use V8 they
// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption() {
- Isolate* isolate = Isolate::Current();
+void ContextSwitcher::StopPreemption(Isolate* isolate) {
ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() != NULL) {
// The ContextSwitcher thread is running. We need to stop it and release
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 8dce8602f..1edacfc3b 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -119,7 +119,7 @@ class ThreadManager {
void EagerlyArchiveThread();
- Mutex* mutex_;
+ Mutex mutex_;
ThreadId mutex_owner_;
ThreadId lazily_archived_thread_;
ThreadState* lazily_archived_thread_state_;
@@ -146,10 +146,10 @@ class ThreadManager {
class ContextSwitcher: public Thread {
public:
// Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(int every_n_ms);
+ static void StartPreemption(Isolate* isolate, int every_n_ms);
// Stop sending preemption requests to threads.
- static void StopPreemption();
+ static void StopPreemption(Isolate* isolate);
// Preempted thread needs to call back to the ContextSwitcher to acknowledge
// the handling of a preemption request.
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 270bd084e..a65b54f67 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 20
-#define BUILD_NUMBER 17
-#define PATCH_LEVEL 14
+#define MINOR_VERSION 21
+#define BUILD_NUMBER 18
+#define PATCH_LEVEL 3
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index 2b5d7d71f..98b0120ea 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_WIN32_HEADERS_H_
+#define V8_WIN32_HEADERS_H_
+
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
#define WIN32_LEAN_AND_MEAN
@@ -55,7 +58,6 @@
#include <windows.h>
-#ifdef V8_WIN32_HEADERS_FULL
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
#include <mmsystem.h> // For timeGetTime().
@@ -81,7 +83,6 @@
#endif // __MINGW32__
#include <process.h> // For _beginthreadex().
#include <stdlib.h>
-#endif // V8_WIN32_HEADERS_FULL
#undef VOID
#undef DELETE
@@ -94,5 +95,7 @@
#undef ANY
#undef IGNORE
#undef GetObject
-#undef CreateMutex
#undef CreateSemaphore
+#undef Yield
+
+#endif // V8_WIN32_HEADERS_H_
diff --git a/deps/v8/src/win32-math.cc b/deps/v8/src/win32-math.cc
index 9ffc4ea73..88fa3a684 100644
--- a/deps/v8/src/win32-math.cc
+++ b/deps/v8/src/win32-math.cc
@@ -31,8 +31,6 @@
// (http://www.opengroup.org/onlinepubs/000095399/)
#ifdef _MSC_VER
-#undef V8_WIN32_LEAN_AND_MEAN
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 826c06e5b..07d07033e 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -448,7 +448,7 @@ Object** RelocInfo::call_object_address() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -463,12 +463,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 8969d89a6..41bf297b3 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -63,98 +63,32 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rdi);
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movl(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { CpuFeatureScope fscope(&assm, CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE64);
- __ cpuid();
+ // SSE2 must be available on every x64 CPU.
+ ASSERT(cpu.has_sse2());
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
+
+ // CMOD must be available on every x64 CPU.
+ ASSERT(cpu.has_cmov());
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
+
+ // SAHF is not generally available in long mode.
+ if (cpu.has_sahf()) {
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
}
- supported_ = kDefaultCpuFeatures;
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ pop(rdi);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
-
- uint64_t probed_features = probe();
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
found_by_runtime_probing_only_
= probed_features & ~kDefaultCpuFeatures & ~platform_features;
-
- // CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
}
@@ -462,7 +396,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -987,7 +921,6 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
- ASSERT(IsEnabled(CPUID));
EnsureSpace ensure_space(this);
emit(0x0F);
emit(0xA2);
@@ -1600,7 +1533,7 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value.location(), mode);
@@ -1919,13 +1852,6 @@ void Assembler::pushfq() {
}
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -2992,6 +2918,17 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x01); // LT == 1
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src,
Assembler::RoundingMode mode) {
ASSERT(IsEnabled(SSE4_1));
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 4e36b6e4b..f2e37fe86 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -475,7 +475,6 @@ class CpuFeatures : public AllStatic {
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -1176,7 +1175,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1386,6 +1384,8 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 18a6e566c..81721c25e 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -73,6 +73,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+ // Function is also the parameter to the runtime call.
+ __ push(rdi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movq(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -84,57 +102,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
-}
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -586,26 +574,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
@@ -613,26 +582,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
@@ -708,7 +658,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(r10, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
@@ -717,7 +667,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -782,8 +732,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, rax);
+ __ movq(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -808,7 +758,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ movq(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -837,7 +787,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
// Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ movq(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
// Use the global receiver object from the called function as the
@@ -851,7 +801,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+ __ movq(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -868,7 +818,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ movq(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1178,10 +1128,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the first argument into rax and get rid of the rest
// (including the receiver).
+ StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
__ testq(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ movq(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
@@ -1407,32 +1358,46 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+ __ Integer32ToSmi(rdx, rdx);
+
+ // Pass both function and pc offset as arguments.
__ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(rdx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmpq(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ movq(Operand(rsp, 0), rax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 787a50182..51e1a5395 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -295,140 +306,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ movq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
- rdi); // Initialize with undefined.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // rcx holds native context, rbx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
- __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. rdx holds an index.
- Label loop;
- Label restore;
- __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
- __ bind(&loop);
- // Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
- __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
- __ cmpq(rcx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ movq(rdx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Now link a function into a list of optimized functions.
- __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
- // No need for write barrier as JSFunction (rax) is in the new space.
-
- __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
- // Store JSFunction (rax) into rdx before issuing write barrier as
- // it clobbers all the registers passed.
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- rdx,
- rbx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ PopReturnAddressTo(rcx);
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -437,7 +314,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
@@ -483,10 +361,10 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(1));
// Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
@@ -560,7 +438,6 @@ class FloatingPointHelper : public AllStatic {
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
@@ -569,9 +446,6 @@ class FloatingPointHelper : public AllStatic {
static void LoadAsIntegers(MacroAssembler* masm,
Label* operand_conversion_failure,
Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
// Tries to convert two values to smis losslessly.
// This fails if either argument is not a Smi nor a HeapNumber,
@@ -1262,8 +1136,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
Label input_not_smi, loaded;
+
// Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
@@ -1324,7 +1200,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference::transcendental_cache_array_address(masm->isolate());
__ movq(rax, cache_array);
int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
+ type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
@@ -1548,40 +1424,6 @@ void TranscendentalCacheStub::GenerateOperation(
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ JumpIfNotSmi(rax, &rax_is_object);
-
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
@@ -1605,10 +1447,8 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ // Get the untagged integer version of the rdx heap number in r8.
+ __ TruncateHeapNumberToI(r8, rdx);
// Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
@@ -1628,9 +1468,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the rax heap number in rcx.
- DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ TruncateHeapNumberToI(rcx, rax);
__ bind(&done);
__ movl(rax, r8);
@@ -1645,30 +1483,6 @@ void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -1796,8 +1610,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
- __ movq(base, Operand(rsp, 2 * kPointerSize));
- __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(base, args.GetArgumentOperand(0));
+ __ movq(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -1830,16 +1645,17 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
+ Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtlsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -2230,7 +2046,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
// rbx = parameter count (untagged)
// Check if the calling frame is an arguments adaptor frame.
@@ -2252,7 +2069,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
@@ -2313,7 +2130,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -2364,7 +2181,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 1 * kPointerSize));
+ __ addq(r8, args.GetArgumentOperand(2));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movq(rdx, rdi);
@@ -2403,7 +2220,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label arguments_loop, arguments_test;
__ movq(r8, rbx);
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
@@ -2430,7 +2247,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -2449,12 +2266,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -2475,18 +2293,19 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
@@ -2516,7 +2335,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -2527,7 +2346,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -3010,7 +2829,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(r8, args.GetArgumentOperand(0));
__ JumpIfNotSmi(r8, &slowcase);
__ SmiToInteger32(rbx, r8);
__ cmpl(rbx, Immediate(kMaxInlineLength));
@@ -3048,11 +2868,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
+ __ movq(r8, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
+ __ movq(r8, args.GetArgumentOperand(1));
__ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ __ movq(r8, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
// Fill out the elements FixedArray.
@@ -3183,7 +3003,8 @@ void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
- __ movq(rbx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rbx, args.GetArgumentOperand(0));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
@@ -3497,16 +3318,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
@@ -3596,6 +3407,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function;
+ StackArgumentsAccessor args(rsp, argc_);
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -3603,15 +3415,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (ReceiverMightBeImplicit()) {
Label call;
// Get the receiver from the stack.
- // +1 ~ return address
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+ __ movq(rax, args.GetReceiverOperand());
// Call as function is indicated with the hole.
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &call, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
__ movq(rcx, GlobalObjectOperand());
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
+ __ movq(args.GetReceiverOperand(), rcx);
__ bind(&call);
}
@@ -3673,13 +3484,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ bind(&non_function);
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ movq(args.GetReceiverOperand(), rdi);
__ Set(rax, argc_);
__ Set(rbx, 0);
__ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
+ isolate->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
@@ -3734,7 +3545,7 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
#ifdef _WIN64
return result_size_ == 1;
#else
@@ -4208,12 +4019,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const unsigned int kWordBeforeResultValue = 0x458B4909;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
+ int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
// Get the object - go slow case if it's a smi.
Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
+ StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -4223,7 +4035,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
+ __ movq(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -4258,8 +4070,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
// Get return address and delta to inlined map check.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
@@ -4299,8 +4111,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -4309,7 +4121,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ Set(rax, 0);
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
@@ -4322,8 +4134,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -4331,7 +4143,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
@@ -4489,8 +4301,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
+ __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
// Otherwise, at least one of the arguments is definitely a string,
@@ -4831,7 +4644,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -4839,22 +4651,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- &not_cached);
+ slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
__ bind(&done);
}
@@ -5497,8 +5296,9 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// rsp[8] : right string
// rsp[16] : left string
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(0)); // left
+ __ movq(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
@@ -6011,9 +5811,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
+ kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+ __ movq(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
@@ -6033,7 +5835,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+ __ cmpq(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -6083,8 +5885,6 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
{ REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
@@ -6121,7 +5921,7 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6385,8 +6185,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
// Get array literal index, array literal and its map.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(1));
+ __ movq(rbx, args.GetArgumentOperand(0));
__ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
@@ -6511,96 +6312,133 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // rbx - type info cell
- // rdx - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
// rax - number of arguments
// rdi - constructor?
// rsp[0] - return address
// rsp[8] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
- // is the low bit set? If so, we are holey and that is good.
- __ testb(rdx, Immediate(1));
Label normal_sequence;
- __ j(not_zero, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ testb(rdx, Immediate(1));
+ __ j(not_zero, &normal_sequence);
+ }
// look at the first argument
- __ movq(rcx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ incl(rdx);
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &normal_sequence);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ Integer32ToSmi(rdx, rdx);
- __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
- __ SmiToInteger32(rdx, rdx);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ incl(rdx);
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
+
+ // Save the resulting elements kind in type info
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
+ __ SmiToInteger32(rdx, rdx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -6633,6 +6471,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -6668,50 +6534,22 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
-
- // The type cell may have undefined in its value.
- __ Cmp(rdx, undefined_sentinel);
- __ j(equal, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
__ Cmp(FieldOperand(rdx, 0),
Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
__ j(not_equal, &no_info);
__ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ movq(rdx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ testq(rax, rax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
@@ -6732,7 +6570,8 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
- __ movq(rcx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index e430bf2c8..41678ecd2 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -69,7 +69,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -321,7 +321,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index a39f14b07..24773c259 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -744,6 +744,28 @@ void Code::PatchPlatformCodeAge(byte* sequence,
}
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
+ ASSERT(index >= 0);
+ ASSERT(base_reg_.is(rsp) || base_reg_.is(rbp));
+ int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
+ int displacement_to_last_argument = base_reg_.is(rsp) ?
+ kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
+ displacement_to_last_argument += extra_displacement_to_last_argument_;
+ if (argument_count_reg_.is(no_reg)) {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // (argument_count_immediate_ + receiver - 1) * kPointerSize.
+ ASSERT(argument_count_immediate_ + receiver > 0);
+ return Operand(base_reg_, displacement_to_last_argument +
+ (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
+ } else {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
+ return Operand(base_reg_, argument_count_reg_, times_pointer_size,
+ displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 5747e0bc6..7d1f59ad5 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -44,8 +44,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -61,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
@@ -103,6 +103,73 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
+
+enum StackArgumentsAccessorReceiverMode {
+ ARGUMENTS_CONTAIN_RECEIVER,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER
+};
+
+
+class StackArgumentsAccessor BASE_EMBEDDED {
+ public:
+ StackArgumentsAccessor(
+ Register base_reg,
+ int argument_count_immediate,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(no_reg),
+ argument_count_immediate_(argument_count_immediate),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ Register argument_count_reg,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(argument_count_reg),
+ argument_count_immediate_(0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ const ParameterCount& parameter_count,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(parameter_count.is_reg() ?
+ parameter_count.reg() : no_reg),
+ argument_count_immediate_(parameter_count.is_immediate() ?
+ parameter_count.immediate() : 0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ Operand GetArgumentOperand(int index);
+ Operand GetReceiverOperand() {
+ ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
+ return GetArgumentOperand(0);;
+ }
+
+ private:
+ const Register base_reg_;
+ const Register argument_count_reg_;
+ const int argument_count_immediate_;
+ const StackArgumentsAccessorReceiverMode receiver_mode_;
+ const int extra_displacement_to_last_argument_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 96c533083..4fa290a8b 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -72,18 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index e6bc92950..6612242a0 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -50,7 +50,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -80,7 +80,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -123,14 +123,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
- // Store the 64-bit value as two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
+ __ PushInt64AsTwoSmis(reg);
}
}
@@ -155,12 +149,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
+ __ PopInt64AsTwoSmis(reg);
}
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index e9cf567f7..303b756ca 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -105,12 +105,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -126,12 +121,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -146,195 +136,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -531,9 +359,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
+ __ push(Operand(rbx, FrameDescription::state_offset()));
__ push(Operand(rbx, FrameDescription::pc_offset()));
__ push(Operand(rbx, FrameDescription::continuation_offset()));
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index eefa70372..9984a4630 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -332,10 +332,10 @@ class DisassemblerX64 {
private:
enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
+ OPERAND_BYTE_SIZE = 0,
+ OPERAND_WORD_SIZE = 1,
+ OPERAND_DOUBLEWORD_SIZE = 2,
+ OPERAND_QUADWORD_SIZE = 3
};
const NameConverter& converter_;
@@ -369,10 +369,10 @@ class DisassemblerX64 {
bool rex_w() { return (rex_ & 0x08) != 0; }
OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
+ if (byte_size_operand_) return OPERAND_BYTE_SIZE;
+ if (rex_w()) return OPERAND_QUADWORD_SIZE;
+ if (operand_size_ != 0) return OPERAND_WORD_SIZE;
+ return OPERAND_DOUBLEWORD_SIZE;
}
char operand_size_code() {
@@ -562,19 +562,19 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
int64_t value;
int count;
switch (size) {
- case BYTE_SIZE:
+ case OPERAND_BYTE_SIZE:
value = *data;
count = 1;
break;
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<int16_t*>(data);
count = 2;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data);
count = 4;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data);
count = 4;
break;
@@ -682,7 +682,8 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
AppendToBuffer("%s%c ", mnem, operand_size_code());
int count = PrintRightOperand(data + 1);
AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
+ OperandSize immediate_size =
+ byte_size_immediate ? OPERAND_BYTE_SIZE : operand_size();
count += PrintImmediate(data + 1 + count, immediate_size);
return 1 + count;
}
@@ -1153,6 +1154,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[current[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ current += 2;
} else {
UnimplementedInstruction();
}
@@ -1229,8 +1249,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
+ } else if (opcode == 0xA2) {
+ // CPUID
AppendToBuffer("%s", mnemonic);
} else if ((opcode & 0xF0) == 0x40) {
@@ -1294,14 +1314,14 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "nop";
case 0x2A: // F2/F3 prefix.
return "cvtsi2s";
- case 0x31:
- return "rdtsc";
case 0x51: // F2 prefix.
return "sqrtsd";
case 0x58: // F2 prefix.
return "addsd";
case 0x59: // F2 prefix.
return "mulsd";
+ case 0x5A: // F2 prefix.
+ return "cvtsd2ss";
case 0x5C: // F2 prefix.
return "subsd";
case 0x5E: // F2 prefix.
@@ -1398,15 +1418,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case MOVE_REG_INSTR: {
byte* addr = NULL;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
data += 9;
break;
@@ -1611,11 +1631,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov%c %s, ",
operand_size_code(),
NameOfCPURegister(reg));
- data += PrintImmediate(data, DOUBLEWORD_SIZE);
+ data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
AppendToBuffer("movb %s, ",
NameOfByteCPURegister(reg));
- data += PrintImmediate(data, BYTE_SIZE);
+ data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
break;
}
@@ -1644,7 +1664,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA1: // Fall through.
case 0xA3:
switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
+ case OPERAND_DOUBLEWORD_SIZE: {
const char* memory_location = NameOfAddress(
reinterpret_cast<byte*>(
*reinterpret_cast<int32_t*>(data + 1)));
@@ -1656,7 +1676,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 5;
break;
}
- case QUADWORD_SIZE: {
+ case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
const char* memory_location = NameOfAddress(
*reinterpret_cast<byte**>(data + 1));
@@ -1682,15 +1702,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA9: {
int64_t value = 0;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<uint16_t*>(data + 1);
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data + 1);
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data + 1);
data += 5;
break;
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 6333e87be..c24512eca 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -280,8 +280,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -341,8 +340,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -388,8 +386,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(rax);
EmitProfilingCounterReset();
@@ -1292,7 +1290,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ Push(info);
+ __ Move(rbx, info);
__ CallStub(&stub);
} else {
__ push(rsi);
@@ -2937,7 +2935,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2951,7 +2949,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
@@ -2969,7 +2967,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmpq(rcx, Immediate(0));
__ j(equal, &done);
- __ LoadInstanceDescriptors(rbx, rbx);
+ __ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
@@ -2977,24 +2975,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
- rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
+ r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
+ __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
+ __ movq(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(rbx, rcx);
+ __ cmpq(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
@@ -3006,14 +3008,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3249,7 +3246,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 4837b9aa9..4a7c68a53 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -822,8 +822,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -859,8 +859,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
__ bind(&miss);
}
@@ -904,8 +904,8 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
Label miss;
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
@@ -940,8 +940,8 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ IncrementCounter(counters->keyed_call_miss(), 1);
}
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Enter an internal frame.
{
@@ -965,7 +965,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
@@ -975,7 +975,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// Patch the receiver on the stack.
__ bind(&global);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
__ bind(&invoke);
}
@@ -1005,8 +1005,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1023,8 +1023,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
@@ -1302,7 +1302,8 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label slow, notin;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
__ movq(rdi, mapped_location);
@@ -1331,7 +1332,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
GenerateMiss(masm);
@@ -1452,8 +1453,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 4fbcbcdfc..9dca6b3e2 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -32,6 +32,7 @@
#include "x64/lithium-codegen-x64.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -39,7 +40,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -47,13 +48,13 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {
codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -257,6 +258,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ subq(rsp, Immediate(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -357,6 +373,7 @@ bool LCodeGen::GenerateDeferredCode() {
}
code->Generate();
if (NeedsDeferredFrame()) {
+ __ bind(code->done());
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
@@ -450,7 +467,7 @@ ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -582,7 +599,7 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -979,8 +996,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1299,7 +1315,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ movq(kScratchRegister, left);
+ } else {
+ __ movl(kScratchRegister, left);
+ }
}
bool can_overflow =
@@ -1347,14 +1367,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger32(left, left);
+ __ SmiToInteger64(left, left);
__ imul(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger32(left, left);
+ __ SmiToInteger64(left, left);
__ imul(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
@@ -1368,9 +1388,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
- __ testl(left, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ testq(left, left);
+ } else {
+ __ testl(left, left);
+ }
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
+ // Constant can't be represented as Smi due to immediate size limit.
+ ASSERT(!instr->hydrogen_value()->representation().IsSmi());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1378,11 +1404,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
- __ orl(kScratchRegister, ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToOperand(right));
+ } else {
+ __ orl(kScratchRegister, ToOperand(right));
+ }
DeoptimizeIf(sign, instr->environment());
} else {
// Test the non-zero operand for negative sign.
- __ orl(kScratchRegister, ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToRegister(right));
+ } else {
+ __ orl(kScratchRegister, ToRegister(right));
+ }
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -1580,7 +1614,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -2467,15 +2501,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -3401,14 +3435,14 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3633,90 +3667,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
- // Choose the right register for the first argument depending on
- // calling convention.
-#ifdef _WIN64
- ASSERT(ToRegister(instr->global_object()).is(rcx));
- Register global_object = rcx;
-#else
- ASSERT(ToRegister(instr->global_object()).is(rdi));
- Register global_object = rdi;
-#endif
-
+ // Assert that register size is twice the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
- __ movq(global_object,
- FieldOperand(global_object, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ movq(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ testl(rax, rax);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
+ Register state1 = ToRegister(instr->scratch2());
+ __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- // Only operate on the lower 32 bit of rax.
- __ movzxwl(rdx, rax);
- __ imull(rdx, rdx, Immediate(18273));
- __ shrl(rax, Immediate(16));
- __ addl(rax, rdx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzxwl(scratch3, state0);
+ __ imull(scratch3, scratch3, Immediate(18273));
+ __ shrl(state0, Immediate(16));
+ __ addl(state0, scratch3);
// Save state[0].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(rdx, rcx);
- __ imull(rdx, rdx, Immediate(36969));
- __ shrl(rcx, Immediate(16));
- __ addl(rcx, rdx);
+ __ movzxwl(scratch3, state1);
+ __ imull(scratch3, scratch3, Immediate(36969));
+ __ shrl(state1, Immediate(16));
+ __ addl(state1, scratch3);
// Save state[1].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shll(rax, Immediate(14));
- __ andl(rcx, Immediate(0x3FFFF));
- __ addl(rax, rcx);
+ Register random = state0;
+ __ shll(random, Immediate(14));
+ __ andl(state1, Immediate(0x3FFFF));
+ __ addl(random, state1);
- __ bind(deferred->exit());
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movq(rcx, V8_INT64_C(0x4130000000000000),
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // We use xmm0 as fixed scratch register here.
+ XMMRegister scratch4 = xmm0;
+ __ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(xmm2, rcx);
- __ movd(xmm1, rax);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Return value is in rax.
+ __ movq(scratch4, scratch3);
+ __ movd(result, random);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
@@ -3906,6 +3914,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4323,12 +4339,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4375,12 +4393,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4469,14 +4489,14 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagU(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4532,12 +4552,14 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4651,60 +4673,47 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+ Label heap_number;
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
if (instr->truncating()) {
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr->environment());
__ Set(input_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(done);
__ bind(&heap_number);
-
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
+ Label bailout;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ TaggedToI(input_reg, input_reg, xmm_temp,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4752,34 +4761,16 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
- __ cmpq(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateDoubleToI(result_reg, input_reg);
} else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
+ Label bailout, done;
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
}
}
@@ -4789,31 +4780,19 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label done;
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ Label bailout, done;
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
__ Integer32ToSmi(result_reg, result_reg);
DeoptimizeIf(overflow, instr->environment());
}
@@ -4881,10 +4860,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- __ CmpHeapObject(reg, target);
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ __ CmpHeapObject(reg, object);
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4901,17 +4880,17 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@@ -5001,12 +4980,14 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5157,7 +5138,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ Push(instr->hydrogen()->shared_info());
+ __ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
@@ -5362,12 +5343,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5381,8 +5364,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5418,9 +5402,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index a74ec7982..f99464501 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -44,7 +44,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -123,10 +123,9 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagU(LNumberTagU* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -191,6 +190,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS
@@ -384,7 +386,7 @@ class LCodeGen BASE_EMBEDDED {
int old_position_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
@@ -420,13 +422,14 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
protected:
@@ -437,6 +440,7 @@ class LDeferredCode: public ZoneObject {
LCodeGen* codegen_;
Label entry_;
Label exit_;
+ Label done_;
Label* external_exit_;
int instruction_index_;
};
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/x64/lithium-gap-resolver-x64.h
index d82845592..f218455b6 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index ce5d50c11..d9daaacca 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -32,6 +32,7 @@
#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -263,6 +264,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -431,6 +440,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -734,12 +752,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -1088,6 +1101,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1592,9 +1613,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), arg_reg_1);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
@@ -1825,8 +1850,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
@@ -1921,9 +1947,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
@@ -2347,10 +2373,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2372,6 +2406,8 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
// There are no real uses of a captured object.
return NULL;
}
@@ -2418,20 +2454,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 77bebe64b..b3d08c8a4 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -62,12 +62,12 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
@@ -160,6 +160,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -186,13 +187,17 @@ class LCodeGen;
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -202,7 +207,7 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
@@ -211,7 +216,7 @@ class LInstruction: public ZoneObject {
set_position(RelocInfo::kNoPosition);
}
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -310,11 +315,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -325,15 +332,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -344,8 +351,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -382,11 +389,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -394,14 +401,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -410,7 +417,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -426,7 +433,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -435,22 +442,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -466,14 +475,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -484,19 +495,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -535,7 +548,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -549,7 +562,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -570,7 +583,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -584,11 +597,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -600,14 +613,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -624,7 +637,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -643,7 +656,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -662,7 +675,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 0> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -677,7 +690,7 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -696,11 +709,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -713,7 +726,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 0> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathRound(LOperand* value) {
inputs_[0] = value;
@@ -726,7 +739,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -739,7 +752,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -751,7 +764,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -763,7 +776,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -775,7 +788,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -787,7 +800,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -804,7 +817,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -816,7 +829,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
@@ -828,7 +841,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -842,7 +855,7 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpHoleAndBranch(LOperand* object) {
inputs_[0] = object;
@@ -855,7 +868,7 @@ class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -866,11 +879,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -883,7 +896,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -896,11 +909,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -911,11 +924,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -929,11 +942,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -947,13 +960,13 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -965,11 +978,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -982,7 +995,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -994,11 +1008,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1014,11 +1028,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1035,7 +1049,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1049,7 +1063,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1067,7 +1081,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1076,7 +1091,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1089,7 +1104,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1104,7 +1119,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1121,7 +1136,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1142,7 +1157,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1157,7 +1172,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1166,7 +1181,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1175,7 +1190,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1190,7 +1205,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1201,16 +1216,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1221,17 +1238,17 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCmpMapAndBranch: public LControlInstruction<1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1246,7 +1263,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 0> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1258,7 +1275,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1271,7 +1288,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
inputs_[0] = value;
@@ -1284,7 +1301,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 0> {
};
-class LDateField: public LTemplateInstruction<1, 1, 0> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1301,7 +1318,7 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1325,7 +1342,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1337,7 +1354,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1357,7 +1374,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1372,7 +1389,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1387,20 +1404,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
LOperand* global_object() { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1412,16 +1438,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1433,16 +1461,18 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1465,7 +1495,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1478,7 +1508,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1492,7 +1522,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1505,7 +1535,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1518,7 +1549,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1533,7 +1564,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
@@ -1541,7 +1572,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1555,14 +1586,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1577,7 +1608,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1592,7 +1623,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1611,7 +1642,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1624,11 +1655,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1645,11 +1676,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1661,7 +1692,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1674,7 +1705,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1683,28 +1731,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1716,20 +1764,20 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1741,7 +1789,7 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
@@ -1753,7 +1801,7 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1764,13 +1812,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1781,25 +1829,25 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
LOperand* key() { return inputs_[0]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1813,30 +1861,30 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1847,13 +1895,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1864,13 +1912,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1880,7 +1928,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1892,7 +1940,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1905,7 +1953,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1919,7 +1967,7 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1931,7 +1979,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1945,7 +1993,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1961,7 +2009,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -1976,7 +2024,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1990,7 +2038,7 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2007,7 +2055,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2019,7 +2067,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2032,7 +2080,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2049,7 +2097,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2064,7 +2112,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
@@ -2073,7 +2121,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2086,14 +2134,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2110,13 +2158,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2131,13 +2179,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
@@ -2155,7 +2203,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2164,7 +2212,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2180,7 +2228,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2195,7 +2243,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2210,7 +2258,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2223,20 +2271,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2249,7 +2297,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2262,7 +2310,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2274,7 +2322,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2286,7 +2334,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2298,7 +2346,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped,
LOperand* temp_xmm) {
@@ -2313,7 +2361,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2326,7 +2374,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate: public LTemplateInstruction<1, 1, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LAllocate(LOperand* size, LOperand* temp) {
inputs_[0] = size;
@@ -2341,21 +2389,21 @@ class LAllocate: public LTemplateInstruction<1, 1, 1> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2368,7 +2416,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2380,7 +2428,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2393,11 +2441,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2411,16 +2459,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2432,7 +2482,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2444,7 +2494,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2460,7 +2510,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2474,7 +2524,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2489,7 +2539,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2499,7 +2549,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 74e3fcc33..69abc5454 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -285,16 +285,17 @@ void MacroAssembler::InNewSpace(Register object,
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+ reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ and_(scratch,
+ Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
}
@@ -524,7 +525,13 @@ void MacroAssembler::Abort(BailoutReason reason) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
+
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE64);
push(kScratchRegister);
@@ -679,22 +686,8 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space,
- bool returns_handle) {
-#if defined(_WIN64) && !defined(__MINGW64__)
- if (!returns_handle) {
- EnterApiExitFrame(arg_stack_space);
- return;
- }
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
+void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
EnterApiExitFrame(arg_stack_space);
-#endif
}
@@ -702,7 +695,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
Label prologue;
Label promote_scheduled_exception;
@@ -775,23 +767,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
- // Can skip the result check for new-style callbacks
- // TODO(dcarney): may need to pass this information down
- // as some function_addresses might not have been registered
- if (returns_handle) {
- Label empty_result;
-#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
- jmp(&prologue);
- bind(&empty_result);
- }
// Load the value from ReturnValue
movq(rax, Operand(rbp, return_value_offset * kPointerSize));
bind(&prologue);
@@ -984,7 +959,10 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
-bool MacroAssembler::IsUnsafeInt(const int x) {
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+bool MacroAssembler::IsUnsafeInt(const int32_t x) {
static const int kMaxBits = 17;
return !is_intn(x, kMaxBits);
}
@@ -992,7 +970,7 @@ bool MacroAssembler::IsUnsafeInt(const int x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -1004,7 +982,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -1015,9 +993,6 @@ void MacroAssembler::SafePush(Smi* src) {
}
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -2222,6 +2197,49 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
}
+void MacroAssembler::Push(Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Register constant = GetSmiConstant(source);
+ push(constant);
+ }
+}
+
+
+void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
+ movq(scratch, src);
+ // High bits.
+ shr(src, Immediate(64 - kSmiShift));
+ shl(src, Immediate(kSmiShift));
+ push(src);
+ // Low bits.
+ shl(scratch, Immediate(kSmiShift));
+ push(scratch);
+}
+
+
+void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
+ pop(scratch);
+ // Low bits.
+ shr(scratch, Immediate(kSmiShift));
+ pop(dst);
+ shr(dst, Immediate(kSmiShift));
+ // High bits.
+ shl(dst, Immediate(64 - kSmiShift));
+ or_(dst, scratch);
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+}
+
+
+// ----------------------------------------------------------------------------
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2461,17 +2479,6 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
}
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
@@ -2479,11 +2486,6 @@ void MacroAssembler::Drop(int stack_elements) {
}
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
void MacroAssembler::TestBit(const Operand& src, int bits) {
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
@@ -2991,6 +2993,117 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
}
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done;
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2siq(result_reg, xmm0);
+ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ // Slow case.
+ if (input_reg.is(result_reg)) {
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), xmm0);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2siq(result_reg, input_reg);
+ movq(kScratchRegister,
+ V8_INT64_C(0x8000000000000000),
+ RelocInfo::NONE64);
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), input_reg);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ cvttsd2si(result_reg, input_reg);
+ cvtlsi2sd(xmm0, result_reg);
+ ucomisd(xmm0, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ andl(result_reg, Immediate(1));
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision,
+ Label::Distance dst) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ // Heap number map check.
+ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ j(not_equal, lost_precision, dst);
+
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, xmm0);
+ cvtlsi2sd(temp, result_reg);
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, dst);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ andl(result_reg, Immediate(1));
+ j(not_zero, lost_precision, dst);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 61abc206e..09c8a800c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -375,6 +375,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int32_t x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
@@ -720,6 +725,14 @@ class MacroAssembler: public Assembler {
}
void Push(Smi* smi);
+
+ // Save away a 64-bit integer on the stack as two 32-bit integers
+ // masquerading as smis so that the garbage collector skips visiting them.
+ void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
+ // smis on the top of stack.
+ void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+
void Test(const Operand& dst, Smi* source);
@@ -774,11 +787,6 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
@@ -967,6 +975,20 @@ class MacroAssembler: public Assembler {
XMMRegister temp_xmm_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision,
+ Label::Distance dst = Label::kFar);
+
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1242,7 +1264,7 @@ class MacroAssembler: public Assembler {
// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
// context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space, bool returns_handle);
+ void PrepareCallApiFunction(int arg_stack_space);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
@@ -1252,7 +1274,6 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_rbp);
// Before calling a C-function from generated code, align arguments on stack.
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index dcd317c66..ca834e277 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -761,7 +761,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -771,7 +771,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ movq(register_location(i), rax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -998,7 +998,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = this->isolate();
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
masm_.CodeObject());
@@ -1188,7 +1188,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 34a557bd1..95276d530 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -29,6 +29,7 @@
#if V8_TARGET_ARCH_X64
+#include "arguments.h"
#include "ic-inl.h"
#include "codegen.h"
#include "stub-cache.h"
@@ -366,6 +367,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -373,8 +379,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
}
@@ -389,7 +393,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ Set(rax, 6);
+ __ Set(rax, StubCache::kInterceptorArgsLength);
__ LoadAddress(rbx, ref);
CEntryStub stub(1);
@@ -414,8 +418,10 @@ static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ movq(StackOperandForReturnAddress(0), scratch);
__ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ movq(args.GetArgumentOperand(i), scratch);
}
}
@@ -464,23 +470,26 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ int api_call_argc = argc + kFastApiCallArguments;
+ StackArgumentsAccessor args(rsp, api_call_argc);
+
// Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
+ __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+ __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), call_data);
+ __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data);
}
__ movq(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, 5 * kPointerSize), kScratchRegister);
- __ movq(Operand(rsp, 6 * kPointerSize), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister);
// Prepare arguments.
STATIC_ASSERT(kFastApiCallArguments == 6);
@@ -488,16 +497,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
-#if defined(__MINGW64__)
+#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
Register callback_arg = rdx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = returns_handle ? rdx : rcx;
- Register callback_arg = returns_handle ? r8 : rdx;
#else
Register arguments_arg = rdi;
Register callback_arg = rsi;
@@ -507,7 +510,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kApiStackSpace);
__ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
__ addq(rbx, Immediate(argc * kPointerSize));
@@ -519,19 +522,49 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
callback_arg,
- argc + kFastApiCallArguments + 1,
- returns_handle,
+ api_call_argc + 1,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Copy return value.
+ __ movq(scratch, Operand(rsp, 0));
+ // Assign stack space for the call arguments.
+ __ subq(rsp, Immediate(stack_space * kPointerSize));
+ // Move the return address on top of the stack.
+ __ movq(Operand(rsp, 0), scratch);
+ // Write holder to stack frame.
+ __ movq(Operand(rsp, 1 * kPointerSize), receiver);
+ // Write receiver to stack frame.
+ int index = stack_space;
+ __ movq(Operand(rsp, index-- * kPointerSize), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ movq(Operand(rsp, index-- * kPointerSize), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -690,7 +723,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -1075,7 +1108,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int depth = 0;
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
+ __ movq(Operand(rsp, kPCOnStackSize), object_reg);
}
// Check the maps in the prototype chain.
@@ -1135,7 +1168,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
+ __ movq(Operand(rsp, kPCOnStackSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1195,7 +1228,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1280,42 +1313,50 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch4().is(reg));
__ PopReturnAddressTo(scratch4());
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
__ push(receiver()); // receiver
- __ push(reg); // holder
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1(), callback);
- __ push(FieldOperand(scratch1(),
+ ASSERT(!scratch2().is(reg));
+ __ Move(scratch2(), callback);
+ __ push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
+ ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ push(kScratchRegister); // return value
__ push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ push(reg); // holder
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
-#if defined(__MINGW64__)
+#if defined(__MINGW64__) || defined(_WIN64)
Register getter_arg = r8;
Register accessor_info_arg = rdx;
Register name_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register getter_arg = returns_handle ? r9 : r8;
- Register accessor_info_arg = returns_handle ? r8 : rdx;
- Register name_arg = returns_handle ? rdx : rcx;
#else
Register getter_arg = rdx;
Register accessor_info_arg = rsi;
@@ -1332,7 +1373,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ PrepareCallApiFunction(kArgStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kArgStackSpace);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
__ lea(rax, Operand(name_arg, 6 * kPointerSize));
@@ -1343,16 +1384,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
getter_arg,
kStackSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1451,7 +1489,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -1470,11 +1508,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Label* miss) {
ASSERT(holder->IsGlobalObject());
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the maps haven't changed.
@@ -1538,9 +1573,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1561,7 +1595,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -1591,11 +1625,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
// Check that function is still array
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1647,9 +1681,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1688,7 +1722,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Save new length.
@@ -1723,7 +1757,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ cmpl(rax, rcx);
__ j(greater, &call_builtin);
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
@@ -1800,7 +1834,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin);
}
- __ movq(rbx, Operand(rsp, argc * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
@@ -1849,7 +1883,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
@@ -1898,9 +1932,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Label miss, return_undefined, call_builtin;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1978,6 +2012,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
Label miss;
Label name_miss;
@@ -2003,9 +2038,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Register receiver = rbx;
Register index = rdi;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -2059,6 +2094,8 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -2084,9 +2121,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Register index = rdi;
Register scratch = rdx;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -2139,13 +2176,14 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(argc - 1));
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2158,7 +2196,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Load the char code argument.
Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
+ __ movq(code, args.GetArgumentOperand(argc));
// Check the code is a smi.
Label slow;
@@ -2200,8 +2238,123 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // TODO(872): implement this.
- return Handle<Code>::null();
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) {
+ return Handle<Code>::null();
+ }
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into rax.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &smi);
+
+ // Check if the argument is a heap number and load its value into xmm0.
+ Label slow;
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Check if the argument is strictly positive. Note this also discards NaN.
+ __ xorpd(xmm1, xmm1);
+ __ ucomisd(xmm0, xmm1);
+ __ j(below_equal, &slow);
+
+ // Do a truncating conversion.
+ __ cvttsd2si(rax, xmm0);
+
+ // Checks for 0x80000000 which signals a failed conversion.
+ Label conversion_failure;
+ __ cmpl(rax, Immediate(0x80000000));
+ __ j(equal, &conversion_failure);
+
+ // Smi tag and return.
+ __ Integer32ToSmi(rax, rax);
+ __ bind(&smi);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is < 2^kMantissaBits.
+ Label already_round;
+ __ bind(&conversion_failure);
+ int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
+ __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ ucomisd(xmm0, xmm1);
+ __ j(above_equal, &already_round);
+
+ // Save a copy of the argument.
+ __ movaps(xmm2, xmm0);
+
+ // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+ __ addsd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+
+ // Compare the argument and the tentative result to get the right mask:
+ // if xmm2 < xmm0:
+ // xmm2 = 1...1
+ // else:
+ // xmm2 = 0...0
+ __ cmpltsd(xmm2, xmm0);
+
+ // Subtract 1 if the argument was less than the tentative result.
+ int64_t kOne = V8_INT64_C(0x3ff0000000000000);
+ __ movq(rbx, kOne, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ andpd(xmm1, xmm2);
+ __ subsd(xmm0, xmm1);
+
+ // Return a new heap number.
+ __ AllocateHeapNumber(rax, rbx, &slow);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ // Return the argument (when it's an already round heap number).
+ __ bind(&already_round);
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // rcx: function name.
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
}
@@ -2223,13 +2376,14 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(argc - 1));
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2240,7 +2394,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(argc));
// Check if the argument is a smi.
Label not_smi;
@@ -2330,9 +2484,9 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
Label miss, miss_before_stack_reserved;
GenerateNameCheck(name, &miss_before_stack_reserved);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
@@ -2384,9 +2538,8 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
@@ -2410,7 +2563,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
break;
@@ -2530,21 +2683,20 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
&miss);
// Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the function really is a function.
__ JumpIfSmi(rax, &miss);
@@ -2555,7 +2707,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -2602,15 +2754,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, arguments());
GenerateGlobalReceiverCheck(object, holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Set up the context (function already in rdi).
@@ -2666,6 +2817,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2733,48 +2902,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(receiver(), HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ Move(scratch1(), cell);
- Operand cell_operand =
- FieldOperand(scratch1(), PropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(cell_operand, value());
- // Cells are always rescanned, so no write barrier here.
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -3037,484 +3164,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi or a heap number containing a smi and branch
- // if the check fails.
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- masm->isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, xmm_scratch0);
- __ cvtlsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- __ Integer32ToSmi(key, scratch);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- Label check_heap_number;
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- { // Clamp the value to [0..255].
- Label done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done, Label::kNear);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2sd(xmm0, rdx);
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
- // Fast path: use machine instruction to convert to int64. If that
- // fails (out-of-range), go into the runtime.
- __ cvttsd2siq(r8, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(r8, kScratchRegister);
- __ j(equal, &slow);
-
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), r8);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), r8);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), r8);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case: call runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store, grow;
- Label check_capacity, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rbx, rax);
- __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_array_map());
- __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
- }
-
- // Store the element at index zero.
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ ret(0);
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub.
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &miss_force_generic);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity, restore_key_transition_elements_kind;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rdi);
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- // Handle smi values specially
- __ bind(&finish_store);
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&restore_key_transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(rax, &value_is_smi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_double_array_map());
- __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
- // Increment the length of the array.
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
-
- __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
- }
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal